diff --git a/.editorconfig b/.editorconfig index d7d9cae..027bdb1 100644 --- a/.editorconfig +++ b/.editorconfig
@@ -11,3 +11,4 @@ charset = utf-8 indent_style = space indent_size = 4 +max_line_length = 88
diff --git a/.gitignore b/.gitignore index 767654b..b521867 100644 --- a/.gitignore +++ b/.gitignore
@@ -13,6 +13,7 @@ *.egg-info/ /build/ /.venv +/.mypy_cache # # Editors @@ -20,6 +21,7 @@ /.idea/ /.vscode/ +*~ # # antlion @@ -35,6 +37,7 @@ # Local development scripts /*.sh +!/format.sh # # third_party
diff --git a/BUILD.gn b/BUILD.gn index f2aab56..582d5b1 100644 --- a/BUILD.gn +++ b/BUILD.gn
@@ -9,28 +9,40 @@ import("//build/python/python_library.gni") +assert(is_host, "antlion only supported on the host toolchain") + # Tests for full build validation group("e2e_tests") { testonly = true - public_deps = [ "src/antlion/tests:e2e_tests" ] + public_deps = [ "tests:e2e_tests" ] } # Subset of tests to validate builds in under 15 minutes. group("e2e_tests_quick") { testonly = true - public_deps = [ "src/antlion/tests:e2e_tests_quick" ] + public_deps = [ "tests:e2e_tests_quick" ] } # Tests for at-desk custom validation group("e2e_tests_manual") { testonly = true - public_deps = [ "src/antlion/tests:e2e_tests_manual" ] + public_deps = [ "tests:e2e_tests_manual" ] } -# deprecated: prefer e2e_tests_quick -group("smoke_tests") { +# Tests to validate the netstack in under 15 minutes. +group("e2e_tests_netstack_quick") { testonly = true - public_deps = [ ":e2e_tests_quick" ] + public_deps = [ + "tests/dhcp:dhcpv4_duplicate_address_test", + "tests/dhcp:dhcpv4_interop_basic_test", + "tests/dhcp:dhcpv4_interop_combinatorial_options_test", + "tests/wlan/functional:beacon_loss_test", + "tests/wlan/performance:channel_sweep_test_quick", + + # TODO(http://b/372467106): Uncomment once ToggleWlanInterfaceStressTest is + # updated to use current Fuchsia APIs for removing interfaces. + # "tests/netstack:toggle_wlan_interface_stress_test", + ] } # Unit tests only @@ -40,15 +52,14 @@ } python_library("antlion") { - source_root = "//third_party/antlion/src/antlion" + enable_mypy = false + source_root = "//third_party/antlion/packages/antlion" + testonly = true sources = [ "__init__.py", "base_test.py", - "bin/__init__.py", - "bin/act.py", "capabilities/__init__.py", "capabilities/ssh.py", - "config_parser.py", "context.py", "controllers/__init__.py", "controllers/access_point.py", @@ -61,8 +72,6 @@ "controllers/android_lib/events.py", "controllers/android_lib/logcat.py", "controllers/android_lib/services.py", - "controllers/android_lib/tel/__init__.py", - "controllers/android_lib/tel/tel_utils.py", "controllers/ap_lib/__init__.py", "controllers/ap_lib/ap_get_interface.py", "controllers/ap_lib/ap_iwconfig.py", @@ -81,6 +90,7 @@ "controllers/ap_lib/radvd.py", "controllers/ap_lib/radvd_config.py", "controllers/ap_lib/radvd_constants.py", + "controllers/ap_lib/regulatory_channels.py", "controllers/ap_lib/third_party_ap_profiles/__init__.py", "controllers/ap_lib/third_party_ap_profiles/actiontec.py", "controllers/ap_lib/third_party_ap_profiles/asus.py", @@ -102,26 +112,14 @@ "controllers/fuchsia_device.py", "controllers/fuchsia_lib/__init__.py", "controllers/fuchsia_lib/base_lib.py", - "controllers/fuchsia_lib/device_lib.py", "controllers/fuchsia_lib/ffx.py", - "controllers/fuchsia_lib/hardware_power_statecontrol_lib.py", "controllers/fuchsia_lib/lib_controllers/__init__.py", - "controllers/fuchsia_lib/lib_controllers/netstack_controller.py", "controllers/fuchsia_lib/lib_controllers/wlan_controller.py", "controllers/fuchsia_lib/lib_controllers/wlan_policy_controller.py", - "controllers/fuchsia_lib/location/__init__.py", - "controllers/fuchsia_lib/location/regulatory_region_lib.py", - "controllers/fuchsia_lib/logging_lib.py", - "controllers/fuchsia_lib/netstack/__init__.py", - "controllers/fuchsia_lib/netstack/netstack_lib.py", "controllers/fuchsia_lib/package_server.py", "controllers/fuchsia_lib/sl4f.py", "controllers/fuchsia_lib/ssh.py", - "controllers/fuchsia_lib/utils_lib.py", - "controllers/fuchsia_lib/wlan_ap_policy_lib.py", "controllers/fuchsia_lib/wlan_deprecated_configuration_lib.py", - "controllers/fuchsia_lib/wlan_lib.py", - "controllers/fuchsia_lib/wlan_policy_lib.py", "controllers/iperf_client.py", "controllers/iperf_server.py", "controllers/openwrt_ap.py", @@ -131,6 +129,7 @@ "controllers/openwrt_lib/openwrt_constants.py", "controllers/openwrt_lib/wireless_config.py", "controllers/openwrt_lib/wireless_settings_applier.py", + "controllers/packet_capture.py", "controllers/pdu.py", "controllers/pdu_lib/__init__.py", "controllers/pdu_lib/digital_loggers/__init__.py", @@ -145,7 +144,6 @@ "controllers/sl4a_lib/sl4a_manager.py", "controllers/sl4a_lib/sl4a_ports.py", "controllers/sl4a_lib/sl4a_session.py", - "controllers/sl4a_lib/sl4a_types.py", "controllers/sniffer.py", "controllers/sniffer_lib/__init__.py", "controllers/sniffer_lib/local/__init__.py", @@ -154,22 +152,26 @@ "controllers/sniffer_lib/local/tshark.py", "controllers/utils_lib/__init__.py", "controllers/utils_lib/commands/__init__.py", + "controllers/utils_lib/commands/command.py", + "controllers/utils_lib/commands/date.py", "controllers/utils_lib/commands/ip.py", + "controllers/utils_lib/commands/journalctl.py", + "controllers/utils_lib/commands/nmcli.py", + "controllers/utils_lib/commands/pgrep.py", "controllers/utils_lib/commands/route.py", "controllers/utils_lib/commands/shell.py", - "controllers/utils_lib/host_utils.py", + "controllers/utils_lib/commands/tcpdump.py", "controllers/utils_lib/ssh/__init__.py", "controllers/utils_lib/ssh/connection.py", "controllers/utils_lib/ssh/formatter.py", "controllers/utils_lib/ssh/settings.py", - "dict_object.py", + "decorators.py", "error.py", "event/__init__.py", "event/decorators.py", "event/event.py", "event/event_bus.py", "event/event_subscription.py", - "event/subscription_bundle.py", "event/subscription_handle.py", "keys.py", "libs/__init__.py", @@ -188,13 +190,9 @@ "libs/proc/__init__.py", "libs/proc/job.py", "libs/proc/process.py", - "libs/yaml_writer.py", "logger.py", "net.py", - "records.py", - "signals.py", - "test_decorators.py", - "test_runner.py", + "runner.py", "test_utils/__init__.py", "test_utils/abstract_devices/__init__.py", "test_utils/abstract_devices/wlan_device.py", @@ -202,7 +200,6 @@ "test_utils/dhcp/__init__.py", "test_utils/dhcp/base_test.py", "test_utils/fuchsia/__init__.py", - "test_utils/fuchsia/utils.py", "test_utils/fuchsia/wmm_test_cases.py", "test_utils/net/__init__.py", "test_utils/net/connectivity_const.py", @@ -210,19 +207,15 @@ "test_utils/wifi/__init__.py", "test_utils/wifi/base_test.py", "test_utils/wifi/wifi_constants.py", - "test_utils/wifi/wifi_performance_test_utils/__init__.py", - "test_utils/wifi/wifi_performance_test_utils/bokeh_figure.py", - "test_utils/wifi/wifi_performance_test_utils/brcm_utils.py", - "test_utils/wifi/wifi_performance_test_utils/ping_utils.py", - "test_utils/wifi/wifi_performance_test_utils/qcom_utils.py", - "test_utils/wifi/wifi_power_test_utils.py", "test_utils/wifi/wifi_test_utils.py", - "tracelogger.py", + "types.py", "utils.py", + "validation.py", ] library_deps = [ + "third_party/github.com/jd/tenacity", + "//src/testing/end_to_end/honeydew", "//third_party/mobly", "//third_party/pyyaml:yaml", - "third_party/github.com/jd/tenacity", ] }
diff --git a/CHANGELOG.md b/CHANGELOG.md index 248b51f..0c36022 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md
@@ -10,7 +10,19 @@ ## [Unreleased] -## 0.3.0 - 2023-05-17 +[unreleased]: https://fuchsia.googlesource.com/antlion/+/refs/tags/v0.3.0..refs/heads/main + +### Removed + +- [BREAKING CHANGE] Support for Python 3.8, 3.9, and 3.10. The minimum supported +version of Python is now 3.11. If running antlion as part of the Fuchsia tree, +nothing is required; Python 3.11 is vendored with Fuchsia and will be found by +GN. If running antlion out of tree, ensure your Python version is at least 3.11. +- `WlanRvrTest` user params `debug_pre_traffic_cmd` and `debug_post_traffic_cmd` + +## [0.3.0] - 2023-05-17 + +[0.3.0]: https://fuchsia.googlesource.com/antlion/+/refs/tags/v0.2.0..refs/tags/v0.3.0 ### Deprecated @@ -23,16 +35,16 @@ ### Added -- Presubmit testing in [CV][CV] (aka CQ). All tests specified with the -`qemu_env` environment will run before every antlion CL is submitted. -- Postsubmit testing in [CI][CI]. See [Milo][builders] for an exhaustive list of -builders. -- [EditorConfig](https://editorconfig.org) file for consistent coding styles. +- Presubmit testing in [CV] (aka CQ). All tests specified with the `qemu_env` +environment will run before every antlion CL is submitted. +- Postsubmit testing in [CI]. See [Milo] for an exhaustive list of builders. +- [EditorConfig] file for consistent coding styles. Installing an EditorConfig plugin for your editor is highly recommended. [CV]: https://chromium.googlesource.com/infra/luci/luci-go/+/refs/heads/main/cv/README.md [CI]: https://chromium.googlesource.com/chromium/src/+/master/docs/tour_of_luci_ui.md -[builders]: https://luci-milo.appspot.com/ui/search?q=antlion +[Milo]: https://luci-milo.appspot.com/ui/search?q=antlion +[EditorConfig]: https://editorconfig.org ### Changed @@ -53,7 +65,7 @@ - Unused controllers and tests (full list) ### Fixed -[unreleased]: https://fuchsia.googlesource.com/antlion/+/refs/tags/v0.2.0..refs/heads/main + - Failure to stop session_manager using ffx in `WlanRebootTest` ([@patricklu], [bug](http://b/267330535)) - Failure to parse 'test_name' in DHCP configuration file in `Dhcpv4InteropTest` @@ -65,6 +77,8 @@ ## [0.2.0] - 2023-01-03 +[0.2.0]: https://fuchsia.googlesource.com/antlion/+/refs/tags/v0.1.0..refs/tags/v0.2.0 + ### Added - Added snapshots before reboot and during test teardown in `WlanRebootTest` @@ -77,12 +91,12 @@ - All path config options in `FuchsiaDevice` expand the home directory (`~`) and environmental variables - - Used by `ssh_priv_key`, `authorized_file_loc`, and `ffx_binary_path` for - sensible defaults using `$FUCHSIA_DIR` + - Used by `ssh_priv_key`, `authorized_file_loc`, and `ffx_binary_path` for + sensible defaults using `$FUCHSIA_DIR` - Running tests works out of the box without specifying `--testpaths` - - Moved `tests` and `unit_tests` to the `antlion` package, enabling - straight-forward packaging of tests. - - Merged `antlion` and `antlion_contrib` packages + - Moved `tests` and `unit_tests` to the `antlion` package, enabling + straight-forward packaging of tests. + - Merged `antlion` and `antlion_contrib` packages - Converted several required dependencies to optional dependencies: - `bokeh` is only needed for producing HTML graphing. If this feature is desired, install antlion with the bokeh option: `pip install ".[bokeh]"` @@ -102,19 +116,19 @@ - Failure to acquire IPv6 address in `WlanRebootTest` ([bug](http://b/256009189)) - Typo in `ChannelSweepTest` preventing use of iPerf ([@patricklu]) - "Country code never updated" error affecting all Fuchsia ToT builds -([@karlward], [bug](https://fxbug.dev/116500)) +([@karlward], [bug](https://fxbug.dev/42067674)) - Parsing new stderr format from `ffx component destroy` ([@karlward], -[bug](https://fxbug.dev/116544)) +[bug](https://fxbug.dev/42067722)) - "Socket operation on non-socket" error during initialization of ffx on MacOS -([@karlward], [bug](https://fxbug.dev/116626)) +([@karlward], [bug](https://fxbug.dev/42067812)) - Python 3.8 support for IPv6 scope IDs ([bug](http://b/261746355)) -[0.2.0]: https://fuchsia.googlesource.com/antlion/+/refs/tags/v0.1.0..refs/tags/v0.2.0 - ## [0.1.0] - 2022-11-28 Forked from ACTS with the following changes +[0.1.0]: https://fuchsia.googlesource.com/antlion/+/refs/tags/v0.1.0 + ### Added - A modern approach to installation using `pyproject.toml` via `pip install .` @@ -125,6 +139,8 @@ - Package and import names from ACTS to antlion - Copyright notice from AOSP to Fuchsia Authors +[src-layout]: https://setuptools.pypa.io/en/latest/userguide/package_discovery.html#src-layout + ### Deprecated - Use of the `setup.py` script. This is only used to keep infrastructure @@ -143,9 +159,6 @@ - KeyError for 'mac_addr' in WlanDeprecatedConfigurationTest ([@sakuma], [bug](http://b/237709921)) -[0.1.0]: https://fuchsia.googlesource.com/antlion/+/refs/tags/v0.1.0 -[src-layout]: https://setuptools.pypa.io/en/latest/userguide/package_discovery.html#src-layout - [@sakuma]: https://fuchsia-review.git.corp.google.com/q/owner:sakuma%2540google.com [@patricklu]: https://fuchsia-review.git.corp.google.com/q/owner:patricklu%2540google.com [@karlward]: https://fuchsia-review.git.corp.google.com/q/owner:karlward%2540google.com
diff --git a/MANIFEST.in b/MANIFEST.in index a8ad1bb..a6caf7f 100644 --- a/MANIFEST.in +++ b/MANIFEST.in
@@ -1,4 +1,4 @@ include setup.py README.md -recursive-include src/antlion * +recursive-include packages/antlion * global-exclude .DS_Store global-exclude *.pyc
diff --git a/README.md b/README.md index 7d5950b..74c5a6d 100644 --- a/README.md +++ b/README.md
@@ -7,7 +7,7 @@ [TOC] -[Docs]: http://go/fxca +[Docs]: http://go/antlion [Report Bug]: http://go/conn-test-bug [Request Feature]: http://b/issues/new?component=1182297&template=1680893 @@ -17,7 +17,7 @@ enables antlion tests that do not require hardware-specific capabilities like WLAN. This is especially useful to verify if antlion builds and runs without syntax errors. If you require WLAN capabilities, see -[below](#running-with-a-physical-device). +[below](#running-with-a-local-physical-device). 1. [Checkout Fuchsia](https://fuchsia.dev/fuchsia-src/get-started/get_fuchsia_source) @@ -47,7 +47,7 @@ 5. Run an antlion test ```sh - fx test --e2e --output //third_party/antlion/src/antlion/tests/examples:sl4f_sanity_test + fx test --e2e --output //third_party/antlion/tests/examples:sl4f_sanity_test ``` ## Running with a local physical device @@ -81,7 +81,7 @@ 4. Run an antlion test ```sh - fx test --e2e --output //third_party/antlion/src/antlion/tests/functional:ping_stress_test + fx test --e2e --output //third_party/antlion/tests/functional:ping_stress_test ``` > Local auxiliary devices are not yet support by `antlion-runner`, which is @@ -91,7 +91,7 @@ ## Running without a Fuchsia checkout -Requires Python 3.8+ +Requires Python 3.11+ 1. Clone the repo @@ -135,7 +135,7 @@ 4. Run the sanity test ```sh - python src/antlion/tests/examples/Sl4fSanityTest.py -c simple-config.yaml + python tests/examples/Sl4fSanityTest.py -c simple-config.yaml ``` ## Contributing @@ -155,9 +155,6 @@ - Install an [EditorConfig](https://editorconfig.org/) plugin for consistent whitespace -- Install [Black](https://pypi.org/project/black/) our preferred code formatter. - Optionally, add the extension to your editor. - - Complete the steps in '[Contribute source changes]' to gain authorization to upload CLs to Fuchsia's Gerrit. @@ -166,12 +163,19 @@ 1. Create a branch (`git checkout -b feature/amazing-feature`) 2. Make changes 3. Document the changes in `CHANGELOG.md` -4. Run your change through `Black` formatter -5. Commit changes (`git add . && git commit -m 'Add some amazing feature'`) -6. Upload CL (`git push origin HEAD:refs/for/main`) +4. Auto-format changes (`./format.sh`) + + > Note: antlion follows the [Black code style] (rather than the + > [Google Python Style Guide]) + +5. Verify no typing errors (`mypy .`) +6. Commit changes (`git add . && git commit -m 'Add some amazing feature'`) +7. Upload CL (`git push origin HEAD:refs/for/main`) > A public bug tracker is not (yet) available. +[Black code style]: https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html +[Google Python Style Guide]: https://google.github.io/styleguide/pyguide.html [Contribute source changes]: https://fuchsia.dev/fuchsia-src/development/source_code/contribute_changes#prerequisites ### Recommended git aliases
diff --git a/antlion_host_test.gni b/antlion_host_test.gni index 96f7654..5593226 100644 --- a/antlion_host_test.gni +++ b/antlion_host_test.gni
@@ -1,3 +1,8 @@ +# Copyright 2024 The Fuchsia Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import("//build/host.gni") import("//build/python/python_binary.gni") import("//build/rust/rustc_binary.gni") import("//build/testing/host_test.gni") @@ -5,12 +10,18 @@ # Declares a host-side antlion test. # -# Example +# Examples # # ``` -# antlion_host_test("Sl4fSanityTest") { +# antlion_host_test("sl4f_sanity_test") { # main_source = "Sl4fSanityTest.py" # } +# +# antlion_host_test("wlan_rvr_test_2g") { +# main_source = "WlanRvrTest.py" +# test_params = "rvr_settings.yaml" +# test_cases = [ "test_rvr_11n_2g_*" ] +# } # ``` # # Parameters @@ -29,10 +40,15 @@ # to the test in the antlion config under the "test_params" key. # Type: string # -# extra_args (optional) -# Additional arguments to pass to the test. +# test_cases (optional) +# List of test cases to run. Defaults to running all test cases. # Type: list(string) # +# test_data_deps (optional) +# List of test data GN targets that are needed at runtime. +# Type: list(string) +# Default: empty list +# # deps # environments # visibility @@ -47,12 +63,21 @@ python_binary(_python_binary_target) { forward_variables_from(invoker, [ + "enable_mypy", "main_source", "sources", + "data_sources", + "data_package_name", ]) output_name = _python_binary_name - main_callable = "test_runner.main" # Mobly-specific entry point. + main_callable = "test_runner.main" # Mobly-specific entry point. deps = [ "//third_party/antlion" ] + if (defined(invoker.test_data_deps)) { + deps += invoker.test_data_deps + } + if (defined(invoker.libraries)) { + deps += invoker.libraries + } testonly = true visibility = [ ":*" ] } @@ -82,7 +107,9 @@ host_test_data(_host_test_data_ssh) { testonly = true visibility = [ ":*" ] - sources = [ "//prebuilt/third_party/openssh-portable/${host_os}-${host_cpu}/bin/ssh" ] + sources = [ + "//prebuilt/third_party/openssh-portable/${host_os}-${host_cpu}/bin/ssh", + ] outputs = [ "${_test_dir}/ssh" ] } @@ -119,6 +146,8 @@ [ "environments", "visibility", + "isolated", + "product_bundle", ]) binary_path = "${root_out_dir}/antlion-runner" @@ -132,15 +161,24 @@ rebase_path("${_test_dir}", root_build_dir), "--ffx-binary", rebase_path("${_test_dir}/ffx", root_build_dir), + "--ffx-subtools-search-path", + rebase_path(host_tools_dir, root_build_dir), "--ssh-binary", rebase_path("${_test_dir}/ssh", root_build_dir), ] + if (defined(invoker.test_cases)) { + args += invoker.test_cases + } + + data_deps = [ "//src/developer/ffx:suite_test_data" ] + deps = [ ":${_host_test_data_ffx}", ":${_host_test_data_ssh}", ":${_host_test_data_target}", "//build/python:interpreter", + "//src/testing/end_to_end/honeydew", "//third_party/antlion/runner", ] @@ -152,8 +190,8 @@ deps += [ ":${_host_test_data_test_params}" ] } - if (defined(invoker.extra_args)) { - args += invoker.extra_args + if (defined(invoker.test_data_deps)) { + deps += invoker.test_data_deps } } }
diff --git a/environments.gni b/environments.gni index 2bdfb53..d19b903 100644 --- a/environments.gni +++ b/environments.gni
@@ -2,29 +2,7 @@ # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. -antlion_astro_env = { - dimensions = { - device_type = "Astro" - pool = "fuchsia.tests.connectivity" - } - tags = [ "antlion" ] -} - -antlion_sherlock_env = { - dimensions = { - device_type = "Sherlock" - pool = "fuchsia.tests.connectivity" - } - tags = [ "antlion" ] -} - -antlion_nelson_env = { - dimensions = { - device_type = "Nelson" - pool = "fuchsia.tests.connectivity" - } - tags = [ "antlion" ] -} +import("//build/testing/environments.gni") astro_ap_env = { dimensions = { @@ -116,26 +94,95 @@ tags = [ "antlion" ] } +nuc11_ap_env = { + dimensions = { + access_points = "1" + device_type = "Intel NUC Kit NUC11TNHv5" + pool = "fuchsia.tests.connectivity" + } + tags = [ "antlion" ] +} + +nuc11_ap_iperf_env = { + dimensions = { + access_points = "1" + device_type = "Intel NUC Kit NUC11TNHv5" + iperf_servers = "1" + pool = "fuchsia.tests.connectivity" + } + tags = [ "antlion" ] +} + +nuc11_ap_iperf_attenuator_env = { + dimensions = { + access_points = "1" + attenuators = "1" + device_type = "Intel NUC Kit NUC11TNHv5" + iperf_servers = "1" + pool = "fuchsia.tests.connectivity" + } + tags = [ "antlion" ] +} + +vim3_ap_env = { + dimensions = { + access_points = "1" + device_type = "Vim3" + pool = "fuchsia.tests.connectivity" + } + tags = [ "antlion" ] +} + +vim3_ap_iperf_env = { + dimensions = { + access_points = "1" + device_type = "Vim3" + iperf_servers = "1" + pool = "fuchsia.tests.connectivity" + } + tags = [ "antlion" ] +} + +vim3_ap_iperf_attenuator_env = { + dimensions = { + access_points = "1" + attenuators = "1" + device_type = "Vim3" + iperf_servers = "1" + pool = "fuchsia.tests.connectivity" + } + tags = [ "antlion" ] +} + +# Display environments supported by antlion. display_envs = [ - antlion_astro_env, - antlion_sherlock_env, - antlion_nelson_env, + astro_env, + sherlock_env, + nelson_env, + nuc11_env, + vim3_env, ] display_ap_envs = [ astro_ap_env, sherlock_ap_env, nelson_ap_env, + nuc11_ap_env, + vim3_ap_env, ] display_ap_iperf_envs = [ astro_ap_iperf_env, sherlock_ap_iperf_env, nelson_ap_iperf_env, + nuc11_ap_iperf_env, + vim3_ap_iperf_env, ] display_ap_iperf_attenuator_envs = [ astro_ap_iperf_attenuator_env, sherlock_ap_iperf_attenuator_env, nelson_ap_iperf_attenuator_env, + nuc11_ap_iperf_attenuator_env, + vim3_ap_iperf_attenuator_env, ]
diff --git a/format.sh b/format.sh new file mode 100755 index 0000000..8ede1f6 --- /dev/null +++ b/format.sh
@@ -0,0 +1,97 @@ +#!/bin/bash + +# Get the directory of this script +SCRIPT_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" + +install_virtual_environment_doc() { + echo "Please install the virtual environment before running format.sh by running" + echo "the following commands:" + echo "" + echo " cd $SCRIPT_DIR" + echo " python3 -m venv .venv" + echo " (source .venv/bin/activate && pip install -e \".[dev]\")" +} + +if [ -f "$SCRIPT_DIR/.venv/bin/activate" ] ; then + source "$SCRIPT_DIR/.venv/bin/activate" +else + echo "" + echo "=====================" + echo "Error: Virtual environment not installed!" + echo "=====================" + echo "" + install_virtual_environment_doc + echo "" + exit 1 +fi + +# Verify expected virtual environment binaries exist to prevent unintentionally running +# different versions from outside the environment. +# +# Note: The virtual environment may exist without the binaries if dependencies weren't installed +# (e.g., running `python3 -m venv .venv` without `pip install -e '.[dev]'`). +find_venv_binary() { + find .venv/bin -name $1 | grep -q . +} + +venv_binaries="autoflake black isort" +all_binaries_found=true + +for binary in $venv_binaries; do + if ! find_venv_binary $binary; then + all_binaries_found=false + echo "Error: $binary not installed in virtual environment" + fi +done + +if ! $all_binaries_found; then + echo "" + install_virtual_environment_doc + echo "" + exit 1 +fi + +# Detect trivial unused code. +# +# Automatically removal is possible, but is considered an unsafe operation. When a +# change hasn't been commited, automatic removal could cause unintended irreversible +# loss of in-progress code. +# +# Note: This cannot detect unused code between modules or packages. For complex unused +# code detection, vulture should be used. +autoflake \ + --quiet \ + --check-diff \ + --remove-duplicate-keys \ + --remove-unused-variables \ + --remove-all-unused-imports \ + --recursive . + +if [ $? -eq 0 ]; then + echo "No unused code found" +else + echo "" + echo "=====================" + echo "Unused code detected!" + echo "=====================" + echo "" + echo "If these changes are trivial, consider running:" + echo "\"autoflake --in-place --remove-unused-variables --remove-all-unused-imports -r .\"" + echo "" + read -p "Run this command to remove all unused code? [y/n] " -n 1 -r + echo "" + echo "" + + if [[ $REPLY =~ ^[Yy]$ ]]; then + autoflake --in-place --remove-unused-variables --remove-all-unused-imports -r . + else + exit 1 + fi +fi + +# Sort imports to avoid bikeshedding. +isort . + +# Format code; also to avoid bikeshedding. +black . +
diff --git a/src/antlion/__init__.py b/packages/antlion/__init__.py similarity index 100% rename from src/antlion/__init__.py rename to packages/antlion/__init__.py
diff --git a/packages/antlion/base_test.py b/packages/antlion/base_test.py new file mode 100755 index 0000000..9e539ca --- /dev/null +++ b/packages/antlion/base_test.py
@@ -0,0 +1,73 @@ +#!/usr/bin/env python3 +# +# Copyright 2022 The Fuchsia Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import re +from typing import Callable + +from mobly.base_test import BaseTestClass +from mobly.base_test import Error as MoblyError + + +class AntlionBaseTest(BaseTestClass): + # TODO(https://github.com/google/mobly/issues/887): Remove this once similar + # functionality is merged into Mobly. + def _get_test_methods( + self, test_names: list[str] + ) -> list[tuple[str, Callable[[], None]]]: + """Resolves test method names to bound test methods. + + Args: + test_names: Test method names. + + Returns: + List of tuples containing the test method name and the function implementing + its logic. + + Raises: + MoblyError: test_names does not match any tests. + """ + + test_table: dict[str, Callable[[], None]] = {**self._generated_test_table} + for name, _ in inspect.getmembers(type(self), callable): + if name.startswith("test_"): + test_table[name] = getattr(self, name) + + test_methods: list[tuple[str, Callable[[], None]]] = [] + for test_name in test_names: + if test_name in test_table: + test_methods.append((test_name, test_table[test_name])) + else: + try: + pattern = re.compile(test_name) + except Exception as e: + raise MoblyError( + f'"{test_name}" is not a valid regular expression' + ) from e + for name in test_table: + if pattern.fullmatch(name.strip()): + test_methods.append((name, test_table[name])) + + if len(test_methods) == 0: + all_patterns = '" or "'.join(test_names) + all_tests = "\n - ".join(test_table.keys()) + raise MoblyError( + f"{self.TAG} does not declare any tests matching " + f'"{all_patterns}". Please verify the correctness of ' + f"{self.TAG} test names: \n - {all_tests}" + ) + + return test_methods
diff --git a/src/antlion/capabilities/__init__.py b/packages/antlion/capabilities/__init__.py similarity index 100% rename from src/antlion/capabilities/__init__.py rename to packages/antlion/capabilities/__init__.py
diff --git a/packages/antlion/capabilities/ssh.py b/packages/antlion/capabilities/ssh.py new file mode 100644 index 0000000..1dddd55 --- /dev/null +++ b/packages/antlion/capabilities/ssh.py
@@ -0,0 +1,456 @@ +#!/usr/bin/env python3 +# +# Copyright 2023 The Fuchsia Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import logging +import os +import shlex +import shutil +import signal +import subprocess +import time +from dataclasses import dataclass +from typing import IO, Mapping + +from mobly import logger, signals + +from antlion.net import wait_for_port +from antlion.runner import CalledProcessError, CalledProcessTransportError, Runner +from antlion.types import Json +from antlion.validation import MapValidator + +DEFAULT_SSH_PORT: int = 22 +DEFAULT_SSH_TIMEOUT_SEC: float = 60.0 +DEFAULT_SSH_CONNECT_TIMEOUT_SEC: int = 90 +DEFAULT_SSH_SERVER_ALIVE_INTERVAL: int = 30 +# The default package repository for all components. + + +class SSHResult: + """Result of an SSH command.""" + + def __init__( + self, + process: ( + subprocess.CompletedProcess[bytes] + | subprocess.CompletedProcess[str] + | subprocess.CalledProcessError + ), + ) -> None: + if isinstance(process.stdout, bytes): + self._stdout_bytes = process.stdout + elif isinstance(process.stdout, str): + self._stdout = process.stdout + else: + raise TypeError( + "Expected process.stdout to be either bytes or str, " + f"got {type(process.stdout)}" + ) + + if isinstance(process.stderr, bytes): + self._stderr_bytes = process.stderr + elif isinstance(process.stderr, str): + self._stderr = process.stderr + else: + raise TypeError( + "Expected process.stderr to be either bytes or str, " + f"got {type(process.stderr)}" + ) + + self._exit_status = process.returncode + + def __str__(self) -> str: + if self.exit_status == 0: + return self.stdout + return f'status {self.exit_status}, stdout: "{self.stdout}", stderr: "{self.stderr}"' + + @property + def stdout(self) -> str: + if not hasattr(self, "_stdout"): + self._stdout = self._stdout_bytes.decode("utf-8", errors="replace") + return self._stdout + + @property + def stdout_bytes(self) -> bytes: + if not hasattr(self, "_stdout_bytes"): + self._stdout_bytes = self._stdout.encode() + return self._stdout_bytes + + @property + def stderr(self) -> str: + if not hasattr(self, "_stderr"): + self._stderr = self._stderr_bytes.decode("utf-8", errors="replace") + return self._stderr + + @property + def exit_status(self) -> int: + return self._exit_status + + +class SSHError(signals.TestError): + """A SSH command returned with a non-zero status code.""" + + def __init__( + self, command: list[str], result: CalledProcessError, elapsed_sec: float + ): + if result.returncode < 0: + try: + reason = f"died with {signal.Signals(-result.returncode)}" + except ValueError: + reason = f"died with unknown signal {-result.returncode}" + else: + reason = f"unexpectedly returned {result.returncode}" + + super().__init__( + f'SSH command "{" ".join(command)}" {reason} after {elapsed_sec:.2f}s\n' + f'stderr: {result.stderr.decode("utf-8", errors="replace")}\n' + f'stdout: {result.stdout.decode("utf-8", errors="replace")}\n' + ) + self.result = result + + +@dataclass +class SSHConfig: + """SSH client config.""" + + # SSH flags. See ssh(1) for full details. + user: str + host_name: str + identity_file: str + + ssh_binary: str = "ssh" + config_file: str = "/dev/null" + port: int = 22 + + # + # SSH options. See ssh_config(5) for full details. + # + connect_timeout: int = DEFAULT_SSH_CONNECT_TIMEOUT_SEC + server_alive_interval: int = DEFAULT_SSH_SERVER_ALIVE_INTERVAL + strict_host_key_checking: bool = False + user_known_hosts_file: str = "/dev/null" + log_level: str = "ERROR" + + # Force allocation of a pseudo-tty. This can be used to execute arbitrary + # screen-based programs on a remote machine, which can be very useful, e.g. + # when implementing menu services. + force_tty: bool = False + + def full_command(self, command: list[str]) -> list[str]: + """Generate the complete command to execute command over SSH. + + Args: + command: The command to run over SSH + force_tty: Force pseudo-terminal allocation. This can be used to + execute arbitrary screen-based programs on a remote machine, + which can be very useful, e.g. when implementing menu services. + + Returns: + Arguments composing the complete call to SSH. + """ + return [ + self.ssh_binary, + # SSH flags + "-i", + self.identity_file, + "-F", + self.config_file, + "-p", + str(self.port), + # SSH configuration options + "-o", + f"ConnectTimeout={self.connect_timeout}", + "-o", + f"ServerAliveInterval={self.server_alive_interval}", + "-o", + f'StrictHostKeyChecking={"yes" if self.strict_host_key_checking else "no"}', + "-o", + f"UserKnownHostsFile={self.user_known_hosts_file}", + "-o", + f"LogLevel={self.log_level}", + "-o", + f'RequestTTY={"force" if self.force_tty else "auto"}', + f"{self.user}@{self.host_name}", + ] + command + + @staticmethod + def from_config(config: Mapping[str, Json]) -> "SSHConfig": + c = MapValidator(config) + ssh_binary_path = c.get(str, "ssh_binary_path", None) + if ssh_binary_path is None: + found_path = shutil.which("ssh") + if not isinstance(found_path, str): + raise ValueError("Failed to find ssh in $PATH") + ssh_binary_path = found_path + + return SSHConfig( + user=c.get(str, "user"), + host_name=c.get(str, "host"), + identity_file=c.get(str, "identity_file"), + ssh_binary=ssh_binary_path, + config_file=c.get(str, "ssh_config", "/dev/null"), + port=c.get(int, "port", 22), + connect_timeout=c.get(int, "connect_timeout", 30), + ) + + +class SSHProvider(Runner): + """Device-specific provider for SSH clients.""" + + def __init__(self, config: SSHConfig) -> None: + """ + Args: + config: SSH client config + """ + logger_tag = f"ssh | {config.host_name}" + if config.port != DEFAULT_SSH_PORT: + logger_tag += f":{config.port}" + + # Escape IPv6 interface identifier if present. + logger_tag = logger_tag.replace("%", "%%") + + self.log = logger.PrefixLoggerAdapter( + logging.getLogger(), + { + logger.PrefixLoggerAdapter.EXTRA_KEY_LOG_PREFIX: f"[{logger_tag}]", + }, + ) + + self.config = config + + try: + self.wait_until_reachable() + self.log.info("sshd is reachable") + except Exception as e: + raise TimeoutError("sshd is unreachable") from e + + def wait_until_reachable(self) -> None: + """Wait for the device to become reachable via SSH. + + Raises: + TimeoutError: connect_timeout has expired without a successful SSH + connection to the device + CalledProcessTransportError: SSH is available on the device but + connect_timeout has expired and SSH fails to run + subprocess.TimeoutExpired: when the timeout expires while waiting + for a child process + """ + timeout_sec = self.config.connect_timeout + timeout = time.time() + timeout_sec + wait_for_port(self.config.host_name, self.config.port, timeout_sec=timeout_sec) + + while True: + try: + self._run( + ["echo"], stdin=None, timeout_sec=timeout_sec, log_output=True + ) + return + except CalledProcessTransportError as e: + # Repeat if necessary; _run() can exit prematurely by receiving + # SSH transport errors. These errors can be caused by sshd not + # being fully initialized yet. + if time.time() < timeout: + continue + else: + raise e + + def wait_until_unreachable( + self, + interval_sec: int = 1, + timeout_sec: int = DEFAULT_SSH_CONNECT_TIMEOUT_SEC, + ) -> None: + """Wait for the device to become unreachable via SSH. + + Args: + interval_sec: Seconds to wait between unreachability attempts + timeout_sec: Seconds to wait until raising TimeoutError + + Raises: + TimeoutError: when timeout_sec has expired without an unsuccessful + SSH connection to the device + """ + timeout = time.time() + timeout_sec + + while True: + try: + wait_for_port( + self.config.host_name, + self.config.port, + timeout_sec=interval_sec, + ) + except TimeoutError: + return + + if time.time() < timeout: + raise TimeoutError( + f"Connection to {self.config.host_name} is still reachable " + f"after {timeout_sec}s" + ) + + def run( + self, + command: str | list[str], + stdin: bytes | None = None, + timeout_sec: float | None = DEFAULT_SSH_TIMEOUT_SEC, + log_output: bool = True, + connect_retries: int = 3, + ) -> subprocess.CompletedProcess[bytes]: + """Run a command on the device then exit. + + Args: + command: String to send to the device. + stdin: Standard input to command. + timeout_sec: Seconds to wait for the command to complete. + connect_retries: Amount of times to retry connect on fail. + + Raises: + subprocess.CalledProcessError: when the process exits with a non-zero status + subprocess.TimeoutExpired: when the timeout expires while waiting + for a child process + CalledProcessTransportError: when the underlying transport fails + + Returns: + SSHResults from the executed command. + """ + if isinstance(command, str): + s = shlex.shlex(command, posix=True, punctuation_chars=True) + s.whitespace_split = True + command = list(s) + return self._run_with_retry( + command, stdin, timeout_sec, log_output, connect_retries + ) + + def _run_with_retry( + self, + command: list[str], + stdin: bytes | None, + timeout_sec: float | None, + log_output: bool, + connect_retries: int, + ) -> subprocess.CompletedProcess[bytes]: + err: Exception = ValueError("connect_retries cannot be 0") + for _ in range(0, connect_retries): + try: + return self._run(command, stdin, timeout_sec, log_output) + except CalledProcessTransportError as e: + err = e + self.log.warning("Connect failed: %s", e) + raise err + + def _run( + self, + command: list[str], + stdin: bytes | None, + timeout_sec: float | None, + log_output: bool, + ) -> subprocess.CompletedProcess[bytes]: + start = time.perf_counter() + with self.start(command) as process: + try: + stdout, stderr = process.communicate(stdin, timeout_sec) + except subprocess.TimeoutExpired as e: + process.kill() + process.wait() + raise e + except: # Including KeyboardInterrupt, communicate handled that. + process.kill() + # We don't call process.wait() as Popen.__exit__ does that for + # us. + raise + + elapsed = time.perf_counter() - start + exit_code = process.poll() + + if log_output: + self.log.debug( + "Command %s exited with %d after %.2fs\nstdout: %s\nstderr: %s", + " ".join(command), + exit_code, + elapsed, + stdout.decode("utf-8", errors="replace"), + stderr.decode("utf-8", errors="replace"), + ) + else: + self.log.debug( + "Command %s exited with %d after %.2fs", + " ".join(command), + exit_code, + elapsed, + ) + + if exit_code is None: + raise ValueError( + f'Expected process to be terminated: "{" ".join(command)}"' + ) + + if exit_code: + err = CalledProcessError( + exit_code, process.args, output=stdout, stderr=stderr + ) + + if err.returncode == 255: + reason = stderr.decode("utf-8", errors="replace") + if ( + "Name or service not known" in reason + or "Host does not exist" in reason + ): + raise CalledProcessTransportError( + f"Hostname {self.config.host_name} cannot be resolved to an address" + ) from err + if "Connection timed out" in reason: + raise CalledProcessTransportError( + f"Failed to establish a connection to {self.config.host_name} within {timeout_sec}s" + ) from err + if "Connection refused" in reason: + raise CalledProcessTransportError( + f"Connection refused by {self.config.host_name}" + ) from err + + raise err + + return subprocess.CompletedProcess(process.args, exit_code, stdout, stderr) + + def run_async(self, command: str) -> subprocess.CompletedProcess[bytes]: + s = shlex.shlex(command, posix=True, punctuation_chars=True) + s.whitespace_split = True + command_split = list(s) + + process = self.start(command_split) + return subprocess.CompletedProcess( + self.config.full_command(command_split), + returncode=0, + stdout=str(process.pid).encode("utf-8"), + stderr=None, + ) + + def start( + self, + command: list[str], + stdout: IO[bytes] | int = subprocess.PIPE, + stdin: IO[bytes] | int = subprocess.PIPE, + ) -> subprocess.Popen[bytes]: + full_command = self.config.full_command(command) + self.log.debug( + f"Starting: {' '.join(command)}\nFull command: {' '.join(full_command)}" + ) + return subprocess.Popen( + full_command, + stdin=stdin, + stdout=stdout if stdout else subprocess.PIPE, + stderr=subprocess.PIPE, + preexec_fn=os.setpgrp, + )
diff --git a/src/antlion/context.py b/packages/antlion/context.py similarity index 90% rename from src/antlion/context.py rename to packages/antlion/context.py index cfe9df8..3f2481f 100644 --- a/src/antlion/context.py +++ b/packages/antlion/context.py
@@ -19,13 +19,14 @@ import os from antlion.event import event_bus -from antlion.event.event import Event -from antlion.event.event import TestCaseBeginEvent -from antlion.event.event import TestCaseEndEvent -from antlion.event.event import TestCaseEvent -from antlion.event.event import TestClassBeginEvent -from antlion.event.event import TestClassEndEvent -from antlion.event.event import TestClassEvent +from antlion.event.event import ( + Event, + TestCaseBeginEvent, + TestCaseEndEvent, + TestClassBeginEvent, + TestClassEndEvent, + TestClassEvent, +) class ContextLevel(enum.IntEnum): @@ -51,25 +52,6 @@ return _contexts[min(depth, len(_contexts) - 1)] -def get_context_for_event(event): - """Creates and returns a TestContext from the given event. - A TestClassContext is created for a TestClassEvent, and a TestCaseContext - is created for a TestCaseEvent. - - Args: - event: An instance of TestCaseEvent or TestClassEvent. - - Returns: An instance of TestContext corresponding to the event. - - Raises: TypeError if event is neither a TestCaseEvent nor TestClassEvent - """ - if isinstance(event, TestCaseEvent): - return _get_context_for_test_case_event(event) - if isinstance(event, TestClassEvent): - return _get_context_for_test_class_event(event) - raise TypeError("Unrecognized event type: %s %s", event, event.__class__) - - def _get_context_for_test_case_event(event): """Generate a TestCaseContext from the given TestCaseEvent.""" return TestCaseContext(event.test_class, event.test_case) @@ -339,7 +321,7 @@ @property def identifier(self): - return "%s.%s" % (self.test_class_name, self.test_case_name) + return f"{self.test_class_name}.{self.test_case_name}" def _get_default_context_dir(self): """Gets the default output directory for this context.
diff --git a/src/antlion/controllers/OWNERS b/packages/antlion/controllers/OWNERS similarity index 100% rename from src/antlion/controllers/OWNERS rename to packages/antlion/controllers/OWNERS
diff --git a/packages/antlion/controllers/__init__.py b/packages/antlion/controllers/__init__.py new file mode 100644 index 0000000..6d1ae5a --- /dev/null +++ b/packages/antlion/controllers/__init__.py
@@ -0,0 +1,47 @@ +#!/usr/bin/env python3 +# +# Copyright 2024 The Fuchsia Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import ( + access_point, + adb, + android_device, + attenuator, + fastboot, + fuchsia_device, + iperf_client, + iperf_server, + openwrt_ap, + packet_capture, + pdu, + sniffer, +) + +# Reexport so static type checkers can find these modules when importing and +# using antlion.controllers instead of "from antlion.controller import ..." +__all__ = [ + "access_point", + "adb", + "android_device", + "attenuator", + "fastboot", + "fuchsia_device", + "iperf_client", + "iperf_server", + "openwrt_ap", + "packet_capture", + "pdu", + "sniffer", +]
diff --git a/src/antlion/controllers/access_point.py b/packages/antlion/controllers/access_point.py similarity index 69% rename from src/antlion/controllers/access_point.py rename to packages/antlion/controllers/access_point.py index 91a241d..15e65b3 100755 --- a/src/antlion/controllers/access_point.py +++ b/packages/antlion/controllers/access_point.py
@@ -14,13 +14,17 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + import ipaddress +import logging +import os import time - from dataclasses import dataclass -from typing import Any, Dict, FrozenSet, List, Optional, Set, Tuple +from typing import Any, FrozenSet -from antlion import logger +from mobly import logger + from antlion import utils from antlion.capabilities.ssh import SSHConfig, SSHProvider from antlion.controllers.ap_lib import hostapd_constants @@ -40,13 +44,15 @@ BssTransitionManagementRequest, ) from antlion.controllers.pdu import PduDevice, get_pdu_port_for_device -from antlion.controllers.utils_lib.commands import ip -from antlion.controllers.utils_lib.commands import route -from antlion.controllers.utils_lib.ssh import connection -from antlion.controllers.utils_lib.ssh import settings -from antlion.libs.proc import job +from antlion.controllers.utils_lib.commands import command, ip, journalctl, route +from antlion.controllers.utils_lib.commands.date import LinuxDateCommand +from antlion.controllers.utils_lib.commands.tcpdump import LinuxTcpdumpCommand +from antlion.controllers.utils_lib.ssh import connection, settings +from antlion.runner import CalledProcessError +from antlion.types import ControllerConfig, Json +from antlion.validation import MapValidator -MOBLY_CONTROLLER_CONFIG_NAME = "AccessPoint" +MOBLY_CONTROLLER_CONFIG_NAME: str = "AccessPoint" ACTS_CONTROLLER_REFERENCE_NAME = "access_points" @@ -70,7 +76,45 @@ BRIDGE_IP_LAST = "100" -class AccessPoint(object): +def create(configs: list[ControllerConfig]) -> list[AccessPoint]: + """Creates ap controllers from a json config. + + Creates an ap controller from either a list, or a single + element. The element can either be just the hostname or a dictionary + containing the hostname and username of the ap to connect to over ssh. + + Args: + The json configs that represent this controller. + + Returns: + A new AccessPoint. + """ + return [AccessPoint(c) for c in configs] + + +def destroy(objects: list[AccessPoint]) -> None: + """Destroys a list of access points. + + Args: + aps: The list of access points to destroy. + """ + for ap in objects: + ap.close() + + +def get_info(objects: list[AccessPoint]) -> list[Json]: + """Get information on a list of access points. + + Args: + aps: A list of AccessPoints. + + Returns: + A list of all aps hostname. + """ + return [ap.ssh_settings.hostname for ap in objects] + + +class AccessPoint: """An access point controller. Attributes: @@ -79,27 +123,28 @@ dhcp_settings: The dhcp server settings being used. """ - def __init__(self, configs: Dict[str, Any]) -> None: + def __init__(self, config: ControllerConfig) -> None: """ Args: configs: configs for the access point from config file. """ - self.ssh_settings = settings.from_config(configs["ssh_config"]) - self.log = logger.create_logger( - lambda msg: f"[Access Point|{self.ssh_settings.hostname}] {msg}" + c = MapValidator(config) + self.ssh_settings = settings.from_config(c.get(dict, "ssh_config")) + self.log = logger.PrefixLoggerAdapter( + logging.getLogger(), + { + logger.PrefixLoggerAdapter.EXTRA_KEY_LOG_PREFIX: f"[Access Point|{self.ssh_settings.hostname}]", + }, ) - self.device_pdu_config = configs.get("PduDevice", None) + self.device_pdu_config = c.get(dict, "PduDevice", None) self.identifier = self.ssh_settings.hostname - if "ap_subnet" in configs: - self._AP_2G_SUBNET_STR: str = configs["ap_subnet"]["2g"] - self._AP_5G_SUBNET_STR: str = configs["ap_subnet"]["5g"] - else: - self._AP_2G_SUBNET_STR = _AP_2GHZ_SUBNET_STR_DEFAULT - self._AP_5G_SUBNET_STR = _AP_5GHZ_SUBNET_STR_DEFAULT + subnet = MapValidator(c.get(dict, "ap_subnet", {})) + self._AP_2G_SUBNET_STR = subnet.get(str, "2g", _AP_2GHZ_SUBNET_STR_DEFAULT) + self._AP_5G_SUBNET_STR = subnet.get(str, "5g", _AP_5GHZ_SUBNET_STR_DEFAULT) - self._AP_2G_SUBNET = Subnet(ipaddress.ip_network(self._AP_2G_SUBNET_STR)) - self._AP_5G_SUBNET = Subnet(ipaddress.ip_network(self._AP_5G_SUBNET_STR)) + self._AP_2G_SUBNET = Subnet(ipaddress.IPv4Network(self._AP_2G_SUBNET_STR)) + self._AP_5G_SUBNET = Subnet(ipaddress.IPv4Network(self._AP_5G_SUBNET_STR)) self.ssh = connection.SshConnection(self.ssh_settings) @@ -116,21 +161,24 @@ ) # Singleton utilities for running various commands. - self._ip_cmd = ip.LinuxIpCommand(self.ssh) - self._route_cmd = route.LinuxRouteCommand(self.ssh) + self._ip_cmd = command.require(ip.LinuxIpCommand(self.ssh)) + self._route_cmd = command.require(route.LinuxRouteCommand(self.ssh)) + self._journalctl_cmd = command.require( + journalctl.LinuxJournalctlCommand(self.ssh) + ) # A map from network interface name to _ApInstance objects representing # the hostapd instance running against the interface. - self._aps: Dict[str, _ApInstance] = dict() - self._dhcp: Optional[DhcpServer] = None - self._dhcp_bss: Dict[Any, Subnet] = dict() - self._radvd: Optional[Radvd] = None + self._aps: dict[str, _ApInstance] = dict() + self._dhcp: DhcpServer | None = None + self._dhcp_bss: dict[str, Subnet] = dict() + self._radvd: Radvd | None = None self.bridge = BridgeInterface(self) self.iwconfig = ApIwconfig(self) # Check to see if wan_interface is specified in acts_config for tests # isolated from the internet and set this override. - self.interfaces = ApInterfaces(self, configs.get("wan_interface")) + self.interfaces = ApInterfaces(self, c.get(str, "wan_interface", None)) # Get needed interface names and initialize the unnecessary ones. self.wan = self.interfaces.get_wan_interface() @@ -141,6 +189,13 @@ self._initial_ap() self.setup_bridge = False + # Allow use of tcpdump + self.tcpdump = LinuxTcpdumpCommand(self.ssh_provider) + + # Access points are not given internet access, so their system time needs to be + # manually set to be accurate. + LinuxDateCommand(self.ssh_provider).sync() + def _initial_ap(self) -> None: """Initial AP interfaces. @@ -153,11 +208,11 @@ # process, otherwise test would fail. try: self.ssh.run("stop wpasupplicant") - except job.Error: + except CalledProcessError: self.log.info("No wpasupplicant running") try: self.ssh.run("stop hostapd") - except job.Error: + except CalledProcessError: self.log.info("No hostapd running") # Bring down all wireless interfaces for iface in self.wlan: @@ -165,21 +220,20 @@ self.ssh.run(WLAN_DOWN) # Bring down all bridge interfaces bridge_interfaces = self.interfaces.get_bridge_interface() - if bridge_interfaces: - for iface in bridge_interfaces: - BRIDGE_DOWN = f"ip link set {iface} down" - BRIDGE_DEL = f"brctl delbr {iface}" - self.ssh.run(BRIDGE_DOWN) - self.ssh.run(BRIDGE_DEL) + for iface in bridge_interfaces: + BRIDGE_DOWN = f"ip link set {iface} down" + BRIDGE_DEL = f"brctl delbr {iface}" + self.ssh.run(BRIDGE_DOWN) + self.ssh.run(BRIDGE_DEL) def start_ap( self, hostapd_config: HostapdConfig, - radvd_config: RadvdConfig = None, + radvd_config: RadvdConfig | None = None, setup_bridge: bool = False, is_nat_enabled: bool = True, - additional_parameters: Dict[str, Any] = None, - ) -> List[Any]: + additional_parameters: dict[str, Any] | None = None, + ) -> list[str]: """Starts as an ap using a set of configurations. This will start an ap on this host. To start an ap the controller @@ -208,6 +262,9 @@ Raises: Error: When the ap can't be brought up. """ + if additional_parameters is None: + additional_parameters = {} + if hostapd_config.frequency < 5000: interface = self.wlan_2g subnet = self._AP_2G_SUBNET @@ -229,20 +286,35 @@ # of the wireless interface needs to have enough space to mask out # up to 8 different mac addresses. So in for one interface the range is # hex 0-7 and for the other the range is hex 8-f. - interface_mac_orig = None - cmd = f"ip link show {interface}|grep ether|awk -F' ' '{{print $2}}'" - interface_mac_orig = self.ssh.run(cmd) + ip = self.ssh.run(["ip", "link", "show", interface]) + + # Example output: + # 5: wlan0: <BROADCAST,MULTICAST> mtu 1500 qdisc mq state DOWN mode DEFAULT group default qlen 1000 + # link/ether f4:f2:6d:aa:99:28 brd ff:ff:ff:ff:ff:ff + + lines = ip.stdout.decode("utf-8").splitlines() + if len(lines) != 2: + raise RuntimeError(f"Expected 2 lines from ip link show, got {len(lines)}") + tokens = lines[1].split() + if len(tokens) != 4: + raise RuntimeError( + f"Expected 4 tokens from ip link show, got {len(tokens)}" + ) + interface_mac_orig = tokens[1] + if interface == self.wlan_5g: - hostapd_config.bssid = interface_mac_orig.stdout[:-1] + "0" + hostapd_config.bssid = f"{interface_mac_orig[:-1]}0" last_octet = 1 - if interface == self.wlan_2g: - hostapd_config.bssid = interface_mac_orig.stdout[:-1] + "8" + elif interface == self.wlan_2g: + hostapd_config.bssid = f"{interface_mac_orig[:-1]}8" last_octet = 9 - if interface in self._aps: + elif interface in self._aps: raise ValueError( "No WiFi interface available for AP on " f"channel {hostapd_config.channel}" ) + else: + raise ValueError(f"Invalid WLAN interface: {interface}") apd = Hostapd(self.ssh, interface) new_instance = _ApInstance(hostapd=apd, subnet=subnet) @@ -257,7 +329,7 @@ # on the AP, but not for traffic handled by the Linux networking stack # such as ping. if radvd_config: - self._route_cmd.add_route(interface, "fe80::/64") + self._route_cmd.add_route(interface, ipaddress.IPv6Interface("fe80::/64")) self._dhcp_bss = dict() if hostapd_config.bss_lookup: @@ -270,19 +342,18 @@ # hostapd interfaces and not the DHCP servers for each # interface. counter = 1 - for bss in hostapd_config.bss_lookup: - if interface_mac_orig: - hostapd_config.bss_lookup[bss].bssid = ( - interface_mac_orig.stdout[:-1] + hex(last_octet)[-1:] - ) - self._route_cmd.clear_routes(net_interface=str(bss)) + for iface in hostapd_config.bss_lookup: + hostapd_config.bss_lookup[iface].bssid = ( + interface_mac_orig[:-1] + hex(last_octet)[-1:] + ) + self._route_cmd.clear_routes(net_interface=str(iface)) if interface is self.wlan_2g: starting_ip_range = self._AP_2G_SUBNET_STR else: starting_ip_range = self._AP_5G_SUBNET_STR a, b, c, d = starting_ip_range.split(".") - self._dhcp_bss[bss] = Subnet( - ipaddress.ip_network(f"{a}.{b}.{int(c) + counter}.{d}") + self._dhcp_bss[iface] = Subnet( + ipaddress.IPv4Network(f"{a}.{b}.{int(c) + counter}.{d}") ) counter = counter + 1 last_octet = last_octet + 1 @@ -291,12 +362,15 @@ # The DHCP serer requires interfaces to have ips and routes before # the server will come up. - interface_ip = ipaddress.ip_interface( - f"{subnet.router}/{subnet.network.netmask}" + interface_ip = ipaddress.IPv4Interface( + f"{subnet.router}/{subnet.network.prefixlen}" ) if setup_bridge is True: bridge_interface_name = "eth_test" - self.create_bridge(bridge_interface_name, [interface, self.lan]) + interfaces = [interface] + if self.lan: + interfaces.append(self.lan) + self.create_bridge(bridge_interface_name, interfaces) self._ip_cmd.set_ipv4_address(bridge_interface_name, interface_ip) else: self._ip_cmd.set_ipv4_address(interface, interface_ip) @@ -305,11 +379,11 @@ # hostapd and assigns the DHCP scopes that were defined but # not used during the hostapd loop above. The k and v # variables represent the interface name, k, and dhcp info, v. - for k, v in self._dhcp_bss.items(): - bss_interface_ip = ipaddress.ip_interface( - f"{self._dhcp_bss[k].router}/{self._dhcp_bss[k].network.netmask}" + for iface, subnet in self._dhcp_bss.items(): + bss_interface_ip = ipaddress.IPv4Interface( + f"{subnet.router}/{subnet.network.prefixlen}" ) - self._ip_cmd.set_ipv4_address(str(k), bss_interface_ip) + self._ip_cmd.set_ipv4_address(iface, bss_interface_ip) # Restart the DHCP server with our updated list of subnets. configured_subnets = self.get_configured_subnets() @@ -333,7 +407,7 @@ return bss_interfaces - def get_configured_subnets(self) -> List[Subnet]: + def get_configured_subnets(self) -> list[Subnet]: """Get the list of configured subnets on the access point. This allows consumers of the access point objects create custom DHCP @@ -357,16 +431,22 @@ Raises: Error: Raised when a dhcp server error is found. """ - self._dhcp.start(config=dhcp_conf) + if self._dhcp is not None: + self._dhcp.start(config=dhcp_conf) def stop_dhcp(self) -> None: """Stop DHCP for this AP object. This allows consumers of the access point objects to control DHCP. """ - self._dhcp.stop() + if self._dhcp is not None: + self._dhcp.stop() - def get_dhcp_logs(self) -> Optional[str]: + def get_systemd_journal(self) -> str: + """Get systemd journal logs from this current boot.""" + return self._journalctl_cmd.logs() + + def get_dhcp_logs(self) -> str | None: """Get DHCP logs for this AP object. This allows consumers of the access point objects to validate DHCP @@ -376,11 +456,11 @@ A string of the dhcp server logs, or None is a DHCP server has not been started. """ - if self._dhcp: + if self._dhcp is not None: return self._dhcp.get_logs() return None - def get_hostapd_logs(self) -> Dict[str, str]: + def get_hostapd_logs(self) -> dict[str, str]: """Get hostapd logs for all interfaces on AP object. This allows consumers of the access point objects to validate hostapd @@ -388,12 +468,12 @@ Returns: A dict with {interface: log} from hostapd instances. """ - hostapd_logs = dict() - for identifier in self._aps: - hostapd_logs[identifier] = self._aps.get(identifier).hostapd.pull_logs() + hostapd_logs: dict[str, str] = dict() + for iface, ap in self._aps.items(): + hostapd_logs[iface] = ap.hostapd.pull_logs() return hostapd_logs - def get_radvd_logs(self) -> Optional[str]: + def get_radvd_logs(self) -> str | None: """Get radvd logs for this AP object. This allows consumers of the access point objects to validate radvd @@ -407,6 +487,43 @@ return self._radvd.pull_logs() return None + def download_ap_logs(self, path: str) -> None: + """Download all available logs from the AP. + + Args: + path: Path to write logs to. + + This convenience method gets all the logs, dhcp, hostapd, radvd. It + writes these to the given path. + """ + dhcp_log = self.get_dhcp_logs() + if dhcp_log: + dhcp_log_path = os.path.join(path, f"{self.identifier}_dhcp_log.txt") + with open(dhcp_log_path, "a") as f: + f.write(dhcp_log) + + hostapd_logs = self.get_hostapd_logs() + for interface in hostapd_logs: + hostapd_log_path = os.path.join( + path, + f"{self.identifier}_hostapd_log_{interface}.txt", + ) + with open(hostapd_log_path, "a") as f: + f.write(hostapd_logs[interface]) + + radvd_log = self.get_radvd_logs() + if radvd_log: + radvd_log_path = os.path.join(path, f"{self.identifier}_radvd_log.txt") + with open(radvd_log_path, "a") as f: + f.write(radvd_log) + + systemd_journal = self.get_systemd_journal() + systemd_journal_path = os.path.join( + path, f"{self.identifier}_systemd_journal.txt" + ) + with open(systemd_journal_path, "a") as f: + f.write(systemd_journal) + def enable_forwarding(self) -> None: """Enable IPv4 and IPv6 forwarding on the AP. @@ -443,7 +560,7 @@ """ self.ssh.run("iptables -t nat -F") - def create_bridge(self, bridge_name: str, interfaces: List[str]) -> None: + def create_bridge(self, bridge_name: str, interfaces: list[str]) -> None: """Create the specified bridge and bridge the specified interfaces. Args: @@ -475,11 +592,11 @@ # If the bridge exists, we'll get an exit_status of 0, indicating # success, so we can continue and remove the bridge. - if result.exit_status == 0: + if result.returncode == 0: self.ssh.run(f"ip link set {bridge_name} down") self.ssh.run(f"brctl delbr {bridge_name}") - def get_bssid_from_ssid(self, ssid: str, band: str) -> Optional[str]: + def get_bssid_from_ssid(self, ssid: str, band: str) -> str | None: """Gets the BSSID from a provided SSID Args: @@ -494,19 +611,29 @@ # Get the interface name associated with the given ssid. for interface in interfaces: - iw_output = self.ssh.run( - f"iw dev {interface} info|grep ssid|awk -F' ' '{{print $2}}'" - ) - if "command failed: No such device" in iw_output.stderr: + iw = self.ssh.run(["iw", "dev", interface, "info"]) + if b"command failed: No such device" in iw.stderr: continue - else: - # If the configured ssid is equal to the given ssid, we found - # the right interface. - if iw_output.stdout == ssid: - iw_output = self.ssh.run( - f"iw dev {interface} info|grep addr|awk -F' ' '{{print $2}}'" + + iw_lines = iw.stdout.decode("utf-8").splitlines() + + for line in iw_lines: + if "ssid" in line and ssid in line: + # We found the right interface. + for line in iw_lines: + if "addr" in line: + tokens = line.split() + if len(tokens) != 2: + raise RuntimeError( + f"Expected iw dev info addr to have 2 tokens, got {tokens}" + ) + return tokens[2] + + iw_out = "\n".join(iw_lines) + raise RuntimeError( + f"iw dev info contained ssid but not addr: \n{iw_out}" ) - return iw_output.stdout + return None def stop_ap(self, identifier: str) -> None: @@ -516,10 +643,9 @@ identifier: The identify of the ap that should be taken down. """ - if identifier not in list(self._aps.keys()): - raise ValueError(f"Invalid identifier {identifier} given") - instance = self._aps.get(identifier) + if instance is None: + raise ValueError(f"Invalid identifier {identifier} given") if self._radvd: self._radvd.stop() @@ -533,12 +659,11 @@ del self._aps[identifier] bridge_interfaces = self.interfaces.get_bridge_interface() - if bridge_interfaces: - for iface in bridge_interfaces: - BRIDGE_DOWN = f"ip link set {iface} down" - BRIDGE_DEL = f"brctl delbr {iface}" - self.ssh.run(BRIDGE_DOWN) - self.ssh.run(BRIDGE_DEL) + for iface in bridge_interfaces: + BRIDGE_DOWN = f"ip link set {iface} down" + BRIDGE_DEL = f"brctl delbr {iface}" + self.ssh.run(BRIDGE_DOWN) + self.ssh.run(BRIDGE_DEL) def stop_all_aps(self) -> None: """Stops all running aps on this device.""" @@ -557,7 +682,7 @@ self.stop_all_aps() self.ssh.close() - def generate_bridge_configs(self, channel: int) -> Tuple[str, Optional[str], str]: + def generate_bridge_configs(self, channel: int) -> tuple[str, str | None, str]: """Generate a list of configs for a bridge between LAN and WLAN. Args: @@ -588,8 +713,8 @@ interval: int = 1000, timeout: int = 1000, size: int = 56, - additional_ping_params: Optional[Any] = None, - ) -> Dict[str, Any]: + additional_ping_params: str = "", + ) -> utils.PingResult: """Pings from AP to dest_ip, returns dict of ping stats (see utils.ping)""" return utils.ping( self.ssh, @@ -601,43 +726,15 @@ additional_ping_params=additional_ping_params, ) - def can_ping( - self, - dest_ip: str, - count: int = 1, - interval: int = 1000, - timeout: int = 1000, - size: int = 56, - additional_ping_params: Optional[Any] = None, - ) -> bool: - """Returns whether ap can ping dest_ip (see utils.can_ping)""" - return utils.can_ping( - self.ssh, - dest_ip, - count=count, - interval=interval, - timeout=timeout, - size=size, - additional_ping_params=additional_ping_params, - ) - def hard_power_cycle( self, - pdus: List[PduDevice], - hostapd_configs: Optional[List[HostapdConfig]] = None, + pdus: list[PduDevice], ) -> None: """Kills, then restores power to AccessPoint, verifying it goes down and comes back online cleanly. Args: pdus: PDUs in the testbed - hostapd_configs: Hostapd settings. If present, these networks will - be spun up after the AP has rebooted. This list can either - contain HostapdConfig objects, or dictionaries with the start_ap - params - (i.e { 'hostapd_config': <HostapdConfig>, - 'setup_bridge': <bool>, - 'additional_parameters': <dict> } ). Raise: Error, if no PduDevice is provided in AccessPoint config. ConnectionError, if AccessPoint fails to go offline or come back. @@ -645,14 +742,13 @@ if not self.device_pdu_config: raise Error("No PduDevice provided in AccessPoint config.") - if hostapd_configs is None: - hostapd_configs = [] + self._journalctl_cmd.save_and_reset() - self.log.info(f"Power cycling") + self.log.info("Power cycling") ap_pdu, ap_pdu_port = get_pdu_port_for_device(self.device_pdu_config, pdus) - self.log.info(f"Killing power") - ap_pdu.off(str(ap_pdu_port)) + self.log.info("Killing power") + ap_pdu.off(ap_pdu_port) self.log.info("Verifying AccessPoint is unreachable.") self.ssh_provider.wait_until_unreachable() @@ -660,8 +756,8 @@ self._aps.clear() - self.log.info(f"Restoring power") - ap_pdu.on(str(ap_pdu_port)) + self.log.info("Restoring power") + ap_pdu.on(ap_pdu_port) self.log.info("Waiting for AccessPoint to become available via SSH.") self.ssh_provider.wait_until_reachable() @@ -672,68 +768,69 @@ self._initial_ap() self.log.info("Power cycled successfully") - for settings in hostapd_configs: - if type(settings) == HostapdConfig: - config = settings - setup_bridge = False - additional_parameters = None - - elif type(settings) == dict: - config = settings["hostapd_config"] - setup_bridge = settings.get("setup_bridge", False) - additional_parameters = settings.get("additional_parameters", None) - else: - raise TypeError( - "Items in hostapd_configs list must either be " - "HostapdConfig objects or dictionaries." - ) - - self.log.info(f"Restarting network {config.ssid}") - self.start_ap( - config, - setup_bridge=setup_bridge, - additional_parameters=additional_parameters, - ) - def channel_switch(self, identifier: str, channel_num: int) -> None: """Switch to a different channel on the given AP.""" - if identifier not in list(self._aps.keys()): - raise ValueError(f"Invalid identifier {identifier} given") instance = self._aps.get(identifier) + if instance is None: + raise ValueError(f"Invalid identifier {identifier} given") self.log.info(f"channel switch to channel {channel_num}") instance.hostapd.channel_switch(channel_num) def get_current_channel(self, identifier: str) -> int: """Find the current channel on the given AP.""" - if identifier not in list(self._aps.keys()): - raise ValueError(f"Invalid identifier {identifier} given") instance = self._aps.get(identifier) + if instance is None: + raise ValueError(f"Invalid identifier {identifier} given") return instance.hostapd.get_current_channel() - def get_stas(self, identifier: str) -> Set[str]: + def get_stas(self, identifier: str) -> set[str]: """Return MAC addresses of all associated STAs on the given AP.""" - if identifier not in list(self._aps.keys()): - raise ValueError(f"Invalid identifier {identifier} given") instance = self._aps.get(identifier) + if instance is None: + raise ValueError(f"Invalid identifier {identifier} given") return instance.hostapd.get_stas() + def sta_authenticated(self, identifier: str, sta_mac: str) -> bool: + """Is STA authenticated?""" + instance = self._aps.get(identifier) + if instance is None: + raise ValueError(f"Invalid identifier {identifier} given") + return instance.hostapd.sta_authenticated(sta_mac) + + def sta_associated(self, identifier: str, sta_mac: str) -> bool: + """Is STA associated?""" + instance = self._aps.get(identifier) + if instance is None: + raise ValueError(f"Invalid identifier {identifier} given") + return instance.hostapd.sta_associated(sta_mac) + + def sta_authorized(self, identifier: str, sta_mac: str) -> bool: + """Is STA authorized (802.1X controlled port open)?""" + instance = self._aps.get(identifier) + if instance is None: + raise ValueError(f"Invalid identifier {identifier} given") + return instance.hostapd.sta_authorized(sta_mac) + def get_sta_extended_capabilities( self, identifier: str, sta_mac: str ) -> ExtendedCapabilities: """Get extended capabilities for the given STA, as seen by the AP.""" - if identifier not in list(self._aps.keys()): - raise ValueError(f"Invalid identifier {identifier} given") instance = self._aps.get(identifier) + if instance is None: + raise ValueError(f"Invalid identifier {identifier} given") return instance.hostapd.get_sta_extended_capabilities(sta_mac) def send_bss_transition_management_req( - self, identifier: str, sta_mac: str, request: BssTransitionManagementRequest - ) -> job.Result: + self, + identifier: str, + sta_mac: str, + request: BssTransitionManagementRequest, + ) -> None: """Send a BSS Transition Management request to an associated STA.""" - if identifier not in list(self._aps.keys()): - raise ValueError("Invalid identifier {identifier} given") instance = self._aps.get(identifier) - return instance.hostapd.send_bss_transition_management_req(sta_mac, request) + if instance is None: + raise ValueError(f"Invalid identifier {identifier} given") + instance.hostapd.send_bss_transition_management_req(sta_mac, request) def setup_ap( @@ -741,26 +838,25 @@ profile_name: str, channel: int, ssid: str, - mode: Optional[str] = None, - preamble: Optional[bool] = None, - beacon_interval: Optional[int] = None, - dtim_period: Optional[int] = None, - frag_threshold: Optional[int] = None, - rts_threshold: Optional[int] = None, - force_wmm: Optional[bool] = None, - hidden: Optional[bool] = False, - security: Optional[Security] = None, - pmf_support: Optional[int] = None, - additional_ap_parameters: Optional[Dict[str, Any]] = None, - password: Optional[str] = None, - n_capabilities: Optional[List[Any]] = None, - ac_capabilities: Optional[List[Any]] = None, - vht_bandwidth: Optional[int] = None, + mode: str | None = None, + preamble: bool | None = None, + beacon_interval: int | None = None, + dtim_period: int | None = None, + frag_threshold: int | None = None, + rts_threshold: int | None = None, + force_wmm: bool | None = None, + hidden: bool | None = False, + security: Security | None = None, + pmf_support: int | None = None, + additional_ap_parameters: dict[str, Any] | None = None, + n_capabilities: list[Any] | None = None, + ac_capabilities: list[Any] | None = None, + vht_bandwidth: int | None = None, wnm_features: FrozenSet[hostapd_constants.WnmFeature] = frozenset(), setup_bridge: bool = False, is_ipv6_enabled: bool = False, is_nat_enabled: bool = True, -): +) -> list[str]: """Creates a hostapd profile and runs it on an ap. This is a convenience function that allows us to start an ap with a single function, without first creating a hostapd config. @@ -779,7 +875,6 @@ security: What security to enable. pmf_support: Whether pmf is not disabled, enabled, or required additional_ap_parameters: Additional parameters to send the AP. - password: Password to connect to WLAN if necessary. check_connectivity: Whether to check for internet connectivity. wnm_features: WNM features to enable on the AP. setup_bridge: Whether to bridge the LAN interface WLAN interface. @@ -796,6 +891,9 @@ Raises: Error: When the ap can't be brought up. """ + if additional_ap_parameters is None: + additional_ap_parameters = {} + ap = create_ap_preset( profile_name=profile_name, iface_wlan_2g=access_point.wlan_2g, @@ -825,41 +923,3 @@ is_nat_enabled=is_nat_enabled, additional_parameters=additional_ap_parameters, ) - - -def create(configs: Any) -> List[AccessPoint]: - """Creates ap controllers from a json config. - - Creates an ap controller from either a list, or a single - element. The element can either be just the hostname or a dictionary - containing the hostname and username of the ap to connect to over ssh. - - Args: - The json configs that represent this controller. - - Returns: - A new AccessPoint. - """ - return [AccessPoint(c) for c in configs] - - -def destroy(aps: List[AccessPoint]) -> None: - """Destroys a list of access points. - - Args: - aps: The list of access points to destroy. - """ - for ap in aps: - ap.close() - - -def get_info(aps: List[AccessPoint]) -> List[str]: - """Get information on a list of access points. - - Args: - aps: A list of AccessPoints. - - Returns: - A list of all aps hostname. - """ - return [ap.ssh_settings.hostname for ap in aps]
diff --git a/src/antlion/controllers/adb.py b/packages/antlion/controllers/adb.py similarity index 93% rename from src/antlion/controllers/adb.py rename to packages/antlion/controllers/adb.py index 5c3848d..61597ff 100644 --- a/src/antlion/controllers/adb.py +++ b/packages/antlion/controllers/adb.py
@@ -19,8 +19,7 @@ import shlex import shutil -from antlion.controllers.adb_lib.error import AdbCommandError -from antlion.controllers.adb_lib.error import AdbError +from antlion.controllers.adb_lib.error import AdbCommandError, AdbError from antlion.libs.proc import job DEFAULT_ADB_TIMEOUT = 60 @@ -78,7 +77,7 @@ adb_path = shutil.which("adb") adb_cmd = [shlex.quote(adb_path)] if serial: - adb_cmd.append("-s %s" % serial) + adb_cmd.append(f"-s {serial}") if ssh_connection is not None: # Kill all existing adb processes on the remote host (if any) # Note that if there are none, then pkill exits with non-zero status @@ -97,7 +96,7 @@ self._server_local_port = local_port if self._server_local_port: - adb_cmd.append("-P %d" % local_port) + adb_cmd.append(f"-P {local_port}") self.adb_str = " ".join(adb_cmd) self._ssh_connection = ssh_connection @@ -159,7 +158,7 @@ """ if isinstance(cmd, list): cmd = " ".join(cmd) - result = job.run(cmd, ignore_status=True, timeout=timeout) + result = job.run(cmd, ignore_status=True, timeout_sec=timeout) ret, out, err = result.exit_status, result.stdout, result.stderr if any( @@ -181,7 +180,7 @@ return out def _exec_adb_cmd(self, name, arg_str, **kwargs): - return self._exec_cmd(" ".join((self.adb_str, name, arg_str)), **kwargs) + return self._exec_cmd(f"{self.adb_str} {name} {arg_str}", **kwargs) def _exec_cmd_nb(self, cmd, **kwargs): """Executes adb commands in a new shell, non blocking. @@ -193,7 +192,7 @@ return job.run_async(cmd, **kwargs) def _exec_adb_cmd_nb(self, name, arg_str, **kwargs): - return self._exec_cmd_nb(" ".join((self.adb_str, name, arg_str)), **kwargs) + return self._exec_cmd_nb(f"{self.adb_str} {name} {arg_str}", **kwargs) def tcp_forward(self, host_port, device_port): """Starts tcp forwarding from localhost to this android device. @@ -214,9 +213,7 @@ host_port = self._ssh_connection.create_ssh_tunnel( remote_port, local_port=host_port ) - output = self.forward( - "tcp:%d tcp:%d" % (host_port, device_port), ignore_status=True - ) + output = self.forward(f"tcp:{host_port} tcp:{device_port}", ignore_status=True) # If hinted_port is 0, the output will be the selected port. # Otherwise, there will be no output upon successfully # forwarding the hinted port. @@ -243,7 +240,7 @@ return # The actual port we need to disable via adb is on the remote host. host_port = remote_port - self.forward("--remove tcp:%d" % host_port) + self.forward(f"--remove tcp:{host_port}") def getprop(self, prop_name): """Get a property of the device. @@ -257,7 +254,7 @@ A string that is the value of the property, or None if the property doesn't exist. """ - return self.shell("getprop %s" % prop_name) + return self.shell(f"getprop {prop_name}") # TODO: This should be abstracted out into an object like the other shell # command.
diff --git a/src/antlion/controllers/adb_lib/__init__.py b/packages/antlion/controllers/adb_lib/__init__.py similarity index 100% rename from src/antlion/controllers/adb_lib/__init__.py rename to packages/antlion/controllers/adb_lib/__init__.py
diff --git a/src/antlion/controllers/adb_lib/error.py b/packages/antlion/controllers/adb_lib/error.py similarity index 100% rename from src/antlion/controllers/adb_lib/error.py rename to packages/antlion/controllers/adb_lib/error.py
diff --git a/src/antlion/controllers/android_device.py b/packages/antlion/controllers/android_device.py similarity index 91% rename from src/antlion/controllers/android_device.py rename to packages/antlion/controllers/android_device.py index 0eb0969..87c7b94 100755 --- a/src/antlion/controllers/android_device.py +++ b/packages/antlion/controllers/android_device.py
@@ -14,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + import collections import logging import math @@ -26,22 +28,20 @@ from antlion import context from antlion import logger as acts_logger -from antlion import tracelogger from antlion import utils -from antlion.controllers import adb +from antlion.controllers import adb, fastboot from antlion.controllers.adb_lib.error import AdbError -from antlion.controllers import fastboot from antlion.controllers.android_lib import errors from antlion.controllers.android_lib import events as android_events -from antlion.controllers.android_lib import logcat -from antlion.controllers.android_lib import services +from antlion.controllers.android_lib import logcat, services from antlion.controllers.sl4a_lib import sl4a_manager -from antlion.controllers.utils_lib.ssh import connection -from antlion.controllers.utils_lib.ssh import settings +from antlion.controllers.utils_lib.ssh import connection, settings from antlion.event import event_bus from antlion.libs.proc import job +from antlion.runner import Runner +from antlion.types import ControllerConfig, Json -MOBLY_CONTROLLER_CONFIG_NAME = "AndroidDevice" +MOBLY_CONTROLLER_CONFIG_NAME: str = "AndroidDevice" ACTS_CONTROLLER_REFERENCE_NAME = "android_devices" ANDROID_DEVICE_PICK_ALL_TOKEN = "*" @@ -84,7 +84,7 @@ RELEASE_ID_REGEXES = [re.compile(r"\w+\.\d+\.\d+"), re.compile(r"N\w+")] -def create(configs): +def create(configs: list[ControllerConfig]) -> list[AndroidDevice]: """Creates AndroidDevice controller objects. Args: @@ -96,8 +96,6 @@ """ if not configs: raise errors.AndroidDeviceConfigError(ANDROID_DEVICE_EMPTY_CONFIG_MSG) - elif configs == ANDROID_DEVICE_PICK_ALL_TOKEN: - ads = get_all_instances() elif not isinstance(configs, list): raise errors.AndroidDeviceConfigError(ANDROID_DEVICE_NOT_LIST_CONFIG_MSG) elif isinstance(configs[0], str): @@ -107,7 +105,7 @@ # Configs is a list of dicts. ads = get_instances_with_configs(configs) - ads[0].log.info('The primary device under test is "%s".' % ads[0].serial) + ads[0].log.info(f'The primary device under test is "{ads[0].serial}".') for ad in ads: if not ad.is_connected(): @@ -124,20 +122,20 @@ return ads -def destroy(ads): +def destroy(objects: list[AndroidDevice]) -> None: """Cleans up AndroidDevice objects. Args: ads: A list of AndroidDevice objects. """ - for ad in ads: + for ad in objects: try: ad.clean_up() except: ad.log.exception("Failed to clean up properly.") -def get_info(ads): +def get_info(objects: list[AndroidDevice]) -> list[Json]: """Get information on a list of AndroidDevice objects. Args: @@ -146,8 +144,8 @@ Returns: A list of dict, each representing info for an AndroidDevice objects. """ - device_info = [] - for ad in ads: + device_info: list[Json] = [] + for ad in objects: info = {"serial": ad.serial, "model": ad.model} info.update(ad.build_info) device_info.append(info) @@ -210,7 +208,7 @@ return _parse_device_list(out, "fastboot") -def get_instances(serials): +def get_instances(serials) -> list[AndroidDevice]: """Create AndroidDevice instances from a list of serials. Args: @@ -219,7 +217,7 @@ Returns: A list of AndroidDevice objects. """ - results = [] + results: list[AndroidDevice] = [] for s in serials: results.append(AndroidDevice(s)) return results @@ -243,7 +241,7 @@ serial = c.pop("serial") except KeyError: raise errors.AndroidDeviceConfigError( - "Required value 'serial' is missing in AndroidDevice config %s." % c + f"Required value 'serial' is missing in AndroidDevice config {c}." ) client_port = 0 if ANDROID_DEVICE_SL4A_CLIENT_PORT_KEY in c: @@ -289,7 +287,7 @@ return results -def get_all_instances(include_fastboot=False): +def get_all_instances(include_fastboot: bool = False) -> list[AndroidDevice]: """Create AndroidDevice instances for all attached android devices. Args: @@ -355,13 +353,13 @@ filtered = filter_devices(ads, _get_device_filter) if not filtered: raise ValueError( - "Could not find a target device that matches condition: %s." % kwargs + f"Could not find a target device that matches condition: {kwargs}." ) elif len(filtered) == 1: return filtered[0] else: serials = [ad.serial for ad in filtered] - raise ValueError("More than one device matched: %s" % serials) + raise ValueError(f"More than one device matched: {serials}") def take_bug_reports(ads, test_name, begin_time): @@ -412,23 +410,21 @@ def __init__( self, - serial="", - ssh_connection=None, - client_port=0, - forwarded_port=0, - server_port=None, + serial: str = "", + ssh_connection: Runner | None = None, + client_port: int = 0, + forwarded_port: int = 0, + server_port: int | None = None, ): self.serial = serial # logging.log_path only exists when this is used in an ACTS test run. log_path_base = getattr(logging, "log_path", "/tmp/logs") - self.log_dir = "AndroidDevice%s" % serial + self.log_dir = f"AndroidDevice{serial}" self.log_path = os.path.join(log_path_base, self.log_dir) self.client_port = client_port self.forwarded_port = forwarded_port self.server_port = server_port - self.log = tracelogger.TraceLogger( - AndroidDeviceLoggerAdapter(logging.getLogger(), {"serial": serial}) - ) + self.log = AndroidDeviceLoggerAdapter(logging.getLogger(), {"serial": serial}) self._event_dispatchers = {} self._services = [] self.register_service(services.AdbLogcatService(self)) @@ -694,7 +690,7 @@ # skip_sl4a value can be reset from config file if hasattr(self, k) and k != "skip_sl4a": raise errors.AndroidDeviceError( - "Attempting to set existing attribute %s on %s" % (k, self.serial), + f"Attempting to set existing attribute {k} on {self.serial}", serial=self.serial, ) setattr(self, k, v) @@ -710,7 +706,7 @@ for attempt in range(ADB_ROOT_RETRY_COUNT): try: - self.log.debug("Enabling ADB root mode: attempt %d." % attempt) + self.log.debug(f"Enabling ADB root mode: attempt {attempt}.") self.adb.root() except AdbError: if attempt == ADB_ROOT_RETRY_COUNT: @@ -774,7 +770,7 @@ for cmd in ("ps -A", "ps"): try: out = self.adb.shell( - '%s | grep "S %s"' % (cmd, package_name), ignore_status=True + f'{cmd} | grep "S {package_name}"', ignore_status=True ) if package_name not in out: continue @@ -834,10 +830,10 @@ log_end_time = acts_logger.epoch_to_log_line_timestamp(end_time) self.log.debug("Extracting adb log from logcat.") logcat_path = os.path.join( - self.device_log_path, "adblog_%s_debug.txt" % self.serial + self.device_log_path, f"adblog_{self.serial}_debug.txt" ) if not os.path.exists(logcat_path): - self.log.warning("Logcat file %s does not exist." % logcat_path) + self.log.warning(f"Logcat file {logcat_path} does not exist.") return adb_excerpt_dir = os.path.join(self.log_path, dest_path) os.makedirs(adb_excerpt_dir, exist_ok=True) @@ -846,7 +842,7 @@ self.serial, ) tag_len = utils.MAX_FILENAME_LEN - len(out_name) - out_name = "%s,%s" % (tag[:tag_len], out_name) + out_name = f"{tag[:tag_len]},{out_name}" adb_excerpt_path = os.path.join(adb_excerpt_dir, out_name) with open(adb_excerpt_path, "w", encoding="utf-8") as out: in_file = logcat_path @@ -902,14 +898,12 @@ """ if not logcat_path: logcat_path = os.path.join( - self.device_log_path, "adblog_%s_debug.txt" % self.serial + self.device_log_path, f"adblog_{self.serial}_debug.txt" ) if not os.path.exists(logcat_path): - self.log.warning("Logcat file %s does not exist." % logcat_path) + self.log.warning(f"Logcat file {logcat_path} does not exist.") return - output = job.run( - "grep '%s' %s" % (matching_string, logcat_path), ignore_status=True - ) + output = job.run(f"grep '{matching_string}' {logcat_path}", ignore_status=True) if not output.stdout or output.exit_status != 0: return [] if begin_time: @@ -976,7 +970,7 @@ """Stops the adb logcat collection subprocess.""" if not self.is_adb_logcat_on: self.log.warning( - "Android device %s does not have an ongoing adb logcat " % self.serial + f"Android device {self.serial} does not have an ongoing adb logcat " ) return # Set the last timestamp to the current timestamp. This may cause @@ -995,7 +989,7 @@ Linux UID for the apk. """ output = self.adb.shell( - "dumpsys package %s | grep userId=" % apk_name, ignore_status=True + f"dumpsys package {apk_name} | grep userId=", ignore_status=True ) result = re.search(r"userId=(\d+)", output) if result: @@ -1014,7 +1008,7 @@ """ try: output = self.adb.shell( - "dumpsys package %s | grep versionName" % package_name + f"dumpsys package {package_name} | grep versionName" ) pattern = re.compile(r"versionName=(.+)", re.I) result = pattern.findall(output) @@ -1040,7 +1034,7 @@ try: return bool( self.adb.shell( - '(pm list packages | grep -w "package:%s") || true' % package_name + f'(pm list packages | grep -w "package:{package_name}") || true' ) ) @@ -1067,7 +1061,7 @@ for cmd in ("ps -A", "ps"): try: out = self.adb.shell( - '%s | grep "S %s"' % (cmd, package_name), ignore_status=True + f'{cmd} | grep "S {package_name}"', ignore_status=True ) if package_name in out: self.log.info("apk %s is running", package_name) @@ -1096,7 +1090,7 @@ True if package is installed. False otherwise. """ try: - self.adb.shell("am force-stop %s" % package_name, ignore_status=True) + self.adb.shell(f"am force-stop {package_name}", ignore_status=True) except Exception as e: self.log.warning("Fail to stop package %s: %s", package_name, e) @@ -1124,8 +1118,8 @@ time_stamp = acts_logger.normalize_log_line_timestamp( acts_logger.epoch_to_log_line_timestamp(epoch) ) - out_name = "AndroidDevice%s_%s" % (self.serial, time_stamp) - out_name = "%s.zip" % out_name if new_br else "%s.txt" % out_name + out_name = f"AndroidDevice{self.serial}_{time_stamp}" + out_name = f"{out_name}.zip" if new_br else f"{out_name}.txt" full_out_path = os.path.join(br_path, out_name) # in case device restarted, wait for adb interface to return self.wait_for_boot_completion() @@ -1137,15 +1131,13 @@ out = self.adb.shell("bugreportz", timeout=BUG_REPORT_TIMEOUT) if not out.startswith("OK"): raise errors.AndroidDeviceError( - "Failed to take bugreport on %s: %s" % (self.serial, out), + f"Failed to take bugreport on {self.serial}: {out}", serial=self.serial, ) br_out_path = out.split(":")[1].strip().split()[0] - self.adb.pull("%s %s" % (br_out_path, full_out_path)) + self.adb.pull(f"{br_out_path} {full_out_path}") else: - self.adb.bugreport( - " > {}".format(full_out_path), timeout=BUG_REPORT_TIMEOUT - ) + self.adb.bugreport(f" > {full_out_path}", timeout=BUG_REPORT_TIMEOUT) if test_name: self.log.info("Bugreport for %s taken at %s.", test_name, full_out_path) else: @@ -1156,15 +1148,15 @@ self, directory, begin_time=None, skip_files=[], match_string=None ): """Get files names with provided directory.""" - cmd = "find %s -type f" % directory + cmd = f"find {directory} -type f" if begin_time: current_time = utils.get_current_epoch_time() seconds = int(math.ceil((current_time - begin_time) / 1000.0)) - cmd = "%s -mtime -%ss" % (cmd, seconds) + cmd = f"{cmd} -mtime -{seconds}s" if match_string: - cmd = "%s -iname %s" % (cmd, match_string) + cmd = f"{cmd} -iname {match_string}" for skip_file in skip_files: - cmd = "%s ! -iname %s" % (cmd, skip_file) + cmd = f"{cmd} ! -iname {skip_file}" out = self.adb.shell(cmd, ignore_status=True) if ( not out @@ -1190,7 +1182,7 @@ Args: file_path: The path of the file to check for. """ - cmd = "(test -f %s && echo yes) || echo no" % file_path + cmd = f"(test -f {file_path} && echo yes) || echo no" result = self.adb.shell(cmd) if result == "yes": return True @@ -1213,8 +1205,8 @@ if not host_path: host_path = self.log_path for device_path in device_paths: - self.log.info("Pull from device: %s -> %s" % (device_path, host_path)) - self.adb.pull("%s %s" % (device_path, host_path), timeout=PULL_TIMEOUT) + self.log.info(f"Pull from device: {device_path} -> {host_path}") + self.adb.pull(f"{device_path} {host_path}", timeout=PULL_TIMEOUT) def check_crash_report( self, test_name=None, begin_time=None, log_crash_report=False @@ -1223,7 +1215,7 @@ crash_reports = [] for crash_path in CRASH_REPORT_PATHS: try: - cmd = "cd %s" % crash_path + cmd = f"cd {crash_path}" self.adb.shell(cmd) except Exception as e: self.log.debug("received exception %s", e) @@ -1235,14 +1227,14 @@ tombstones = crashes[:] for tombstone in tombstones: if self.adb.shell( - 'cat %s | grep "crash_dump failed to dump process"' % tombstone + f'cat {tombstone} | grep "crash_dump failed to dump process"' ): crashes.remove(tombstone) if crashes: crash_reports.extend(crashes) if crash_reports and log_crash_report: crash_log_path = os.path.join( - self.device_log_path, "Crashes_%s" % self.serial + self.device_log_path, f"Crashes_{self.serial}" ) os.makedirs(crash_log_path, exist_ok=True) self.pull_files(crash_reports, crash_log_path) @@ -1257,31 +1249,28 @@ log_path, begin_time=begin_time, match_string="*.qmdl" ) if qxdm_logs: - qxdm_log_path = os.path.join(self.device_log_path, "QXDM_%s" % self.serial) + qxdm_log_path = os.path.join(self.device_log_path, f"QXDM_{self.serial}") os.makedirs(qxdm_log_path, exist_ok=True) self.log.info("Pull QXDM Log %s to %s", qxdm_logs, qxdm_log_path) self.pull_files(qxdm_logs, qxdm_log_path) self.adb.pull( - "/firmware/image/qdsp6m.qdb %s" % qxdm_log_path, + f"/firmware/image/qdsp6m.qdb {qxdm_log_path}", timeout=PULL_TIMEOUT, ignore_status=True, ) # Zip Folder - utils.zip_directory("%s.zip" % qxdm_log_path, qxdm_log_path) + utils.zip_directory(f"{qxdm_log_path}.zip", qxdm_log_path) shutil.rmtree(qxdm_log_path) else: - self.log.error("Didn't find QXDM logs in %s." % log_path) + self.log.error(f"Didn't find QXDM logs in {log_path}.") if "Verizon" in self.adb.getprop("gsm.sim.operator.alpha"): - omadm_log_path = os.path.join( - self.device_log_path, "OMADM_%s" % self.serial - ) + omadm_log_path = os.path.join(self.device_log_path, f"OMADM_{self.serial}") os.makedirs(omadm_log_path, exist_ok=True) self.log.info("Pull OMADM Log") self.adb.pull( - "/data/data/com.android.omadm.service/files/dm/log/ %s" - % omadm_log_path, + f"/data/data/com.android.omadm.service/files/dm/log/ {omadm_log_path}", timeout=PULL_TIMEOUT, ignore_status=True, ) @@ -1300,21 +1289,18 @@ path, begin_time=begin_time, match_string="*.sdm*" ) if sdm_logs: - sdm_log_path = os.path.join(self.device_log_path, "SDM_%s" % self.serial) + sdm_log_path = os.path.join(self.device_log_path, f"SDM_{self.serial}") os.makedirs(sdm_log_path, exist_ok=True) self.log.info("Pull SDM Log %s to %s", sdm_logs, sdm_log_path) self.pull_files(sdm_logs, sdm_log_path) else: - self.log.error("Didn't find SDM logs in %s." % log_paths) + self.log.error(f"Didn't find SDM logs in {log_paths}.") if "Verizon" in self.adb.getprop("gsm.sim.operator.alpha"): - omadm_log_path = os.path.join( - self.device_log_path, "OMADM_%s" % self.serial - ) + omadm_log_path = os.path.join(self.device_log_path, f"OMADM_{self.serial}") os.makedirs(omadm_log_path, exist_ok=True) self.log.info("Pull OMADM Log") self.adb.pull( - "/data/data/com.android.omadm.service/files/dm/log/ %s" - % omadm_log_path, + f"/data/data/com.android.omadm.service/files/dm/log/ {omadm_log_path}", timeout=PULL_TIMEOUT, ignore_status=True, ) @@ -1361,9 +1347,9 @@ log_file_path: The complete file path to log the results. """ - cmd = "iperf3 -c {} {}".format(server_host, extra_args) + cmd = f"iperf3 -c {server_host} {extra_args}" if log_file_path: - cmd += " --logfile {} &".format(log_file_path) + cmd += f" --logfile {log_file_path} &" self.adb.shell_nb(cmd) def run_iperf_client(self, server_host, extra_args="", timeout=IPERF_TIMEOUT): @@ -1381,9 +1367,7 @@ status: true if iperf client start successfully. results: results have data flow information """ - out = self.adb.shell( - "iperf3 -c {} {}".format(server_host, extra_args), timeout=timeout - ) + out = self.adb.shell(f"iperf3 -c {server_host} {extra_args}", timeout=timeout) clean_out = out.split("\n") if "error" in clean_out[0].lower(): return False, clean_out @@ -1401,7 +1385,7 @@ status: true if iperf server started successfully. results: results have output of command """ - out = self.adb.shell("iperf3 -s {}".format(extra_args)) + out = self.adb.shell(f"iperf3 -s {extra_args}") clean_out = out.split("\n") if "error" in clean_out[0].lower(): return False, clean_out @@ -1431,7 +1415,7 @@ pass time.sleep(5) raise errors.AndroidDeviceError( - "Device %s booting process timed out." % self.serial, serial=self.serial + f"Device {self.serial} booting process timed out.", serial=self.serial ) def reboot( @@ -1503,11 +1487,11 @@ def get_ipv4_address(self, interface="wlan0", timeout=5): for timer in range(0, timeout): try: - ip_string = self.adb.shell("ifconfig %s|grep inet" % interface) + ip_string = self.adb.shell(f"ifconfig {interface}|grep inet") break except adb.AdbError as e: if timer + 1 == timeout: - self.log.warning("Unable to find IP address for %s." % interface) + self.log.warning(f"Unable to find IP address for {interface}.") return None else: time.sleep(1) @@ -1545,7 +1529,7 @@ return None def send_keycode(self, keycode): - self.adb.shell("input keyevent KEYCODE_%s" % keycode) + self.adb.shell(f"input keyevent KEYCODE_{keycode}") def get_my_current_focus_window(self): """Get the current focus window on screen""" @@ -1667,7 +1651,7 @@ self.send_keycode("SLEEP") def send_keycode_number_pad(self, number): - self.send_keycode("NUMPAD_%s" % number) + self.send_keycode(f"NUMPAD_{number}") def unlock_screen(self, password=None): self.log.info("Unlocking with %s", password or "swipe up") @@ -1691,12 +1675,12 @@ name: additional information of screenshot on the file name. """ if name: - file_name = "%s_%s" % (DEFAULT_SCREENSHOT_PATH, name) - file_name = "%s_%s.png" % (file_name, utils.get_current_epoch_time()) + file_name = f"{DEFAULT_SCREENSHOT_PATH}_{name}" + file_name = f"{file_name}_{utils.get_current_epoch_time()}.png" self.ensure_screen_on() self.log.info("Log screenshot to %s", file_name) try: - self.adb.shell("screencap -p %s" % file_name) + self.adb.shell(f"screencap -p {file_name}") except: self.log.error("Fail to log screenshot to %s", file_name) @@ -1712,7 +1696,7 @@ "am start -a com.android.setupwizard.EXIT", ignore_status=True ) self.adb.shell( - "pm disable %s" % self.get_setupwizard_package_name(), + f"pm disable {self.get_setupwizard_package_name()}", ignore_status=True, ) # Wait up to 5 seconds for user_setup_complete to be updated @@ -1744,8 +1728,8 @@ ) wizard_package = package.split("=")[1] activity = package.split("=")[0].split("/")[-2] - self.log.info("%s/.%sActivity" % (wizard_package, activity)) - return "%s/.%sActivity" % (wizard_package, activity) + self.log.info(f"{wizard_package}/.{activity}Activity") + return f"{wizard_package}/.{activity}Activity" def push_system_file(self, src_file_path, dst_file_path, push_timeout=300): """Pushes a file onto the read-only file system. @@ -1765,7 +1749,7 @@ self.ensure_verity_disabled() self.adb.remount() out = self.adb.push( - "%s %s" % (src_file_path, dst_file_path), timeout=push_timeout + f"{src_file_path} {dst_file_path}", timeout=push_timeout ) if "error" in out: self.log.error( @@ -1819,5 +1803,5 @@ class AndroidDeviceLoggerAdapter(logging.LoggerAdapter): def process(self, msg, kwargs): - msg = "[AndroidDevice|%s] %s" % (self.extra["serial"], msg) + msg = f"[AndroidDevice|{self.extra['serial']}] {msg}" return (msg, kwargs)
diff --git a/src/antlion/controllers/android_lib/__init__.py b/packages/antlion/controllers/android_lib/__init__.py similarity index 100% rename from src/antlion/controllers/android_lib/__init__.py rename to packages/antlion/controllers/android_lib/__init__.py
diff --git a/src/antlion/controllers/android_lib/errors.py b/packages/antlion/controllers/android_lib/errors.py similarity index 100% rename from src/antlion/controllers/android_lib/errors.py rename to packages/antlion/controllers/android_lib/errors.py
diff --git a/src/antlion/controllers/android_lib/events.py b/packages/antlion/controllers/android_lib/events.py similarity index 100% rename from src/antlion/controllers/android_lib/events.py rename to packages/antlion/controllers/android_lib/events.py
diff --git a/src/antlion/controllers/android_lib/logcat.py b/packages/antlion/controllers/android_lib/logcat.py similarity index 92% rename from src/antlion/controllers/android_lib/logcat.py rename to packages/antlion/controllers/android_lib/logcat.py index 0a5e8f7..4aab7d0 100644 --- a/src/antlion/controllers/android_lib/logcat.py +++ b/packages/antlion/controllers/android_lib/logcat.py
@@ -17,9 +17,9 @@ import logging import re -from antlion.libs.proc.process import Process from antlion.libs.logging import log_stream from antlion.libs.logging.log_stream import LogStyles +from antlion.libs.proc.process import Process TIMESTAMP_REGEX = r"((?:\d+-)?\d+-\d+ \d+:\d+:\d+.\d+)" @@ -73,11 +73,7 @@ begin_at = '"%s"' % (timestamp_tracker.last_timestamp or 1) additional_params = extra_params or "" - return "adb -s %s logcat -T %s -v year %s" % ( - serial, - begin_at, - additional_params, - ) + return f"adb -s {serial} logcat -T {begin_at} -v year {additional_params}" return on_retry @@ -94,12 +90,12 @@ A acts.libs.proc.process.Process object. """ logger = log_stream.create_logger( - "adblog_%s" % serial, + f"adblog_{serial}", log_name=serial, subcontext=logcat_dir, log_styles=(LogStyles.LOG_DEBUG | LogStyles.TESTCASE_LOG), ) - process = Process("adb -s %s logcat -T 1 -v year %s" % (serial, extra_params)) + process = Process(f"adb -s {serial} logcat -T 1 -v year {extra_params}") timestamp_tracker = TimestampTracker() process.set_on_output_callback(_log_line_func(logger, timestamp_tracker)) process.set_on_terminate_callback(
diff --git a/src/antlion/controllers/android_lib/services.py b/packages/antlion/controllers/android_lib/services.py similarity index 100% rename from src/antlion/controllers/android_lib/services.py rename to packages/antlion/controllers/android_lib/services.py
diff --git a/src/antlion/controllers/ap_lib/__init__.py b/packages/antlion/controllers/ap_lib/__init__.py similarity index 100% rename from src/antlion/controllers/ap_lib/__init__.py rename to packages/antlion/controllers/ap_lib/__init__.py
diff --git a/src/antlion/controllers/ap_lib/ap_get_interface.py b/packages/antlion/controllers/ap_lib/ap_get_interface.py similarity index 80% rename from src/antlion/controllers/ap_lib/ap_get_interface.py rename to packages/antlion/controllers/ap_lib/ap_get_interface.py index 74a6d2c..9028ded 100644 --- a/src/antlion/controllers/ap_lib/ap_get_interface.py +++ b/packages/antlion/controllers/ap_lib/ap_get_interface.py
@@ -15,10 +15,9 @@ # limitations under the License. import logging +from typing import TYPE_CHECKING -from typing import List, Optional, Tuple, TYPE_CHECKING - -from antlion.libs.proc import job +from antlion.runner import CalledProcessError if TYPE_CHECKING: from antlion.controllers.access_point import AccessPoint @@ -36,7 +35,7 @@ """Class to get network interface information for the device.""" def __init__( - self, ap: "AccessPoint", wan_interface_override: Optional[str] = None + self, ap: "AccessPoint", wan_interface_override: str | None = None ) -> None: """Initialize the ApInterface class. @@ -47,32 +46,33 @@ self.ssh = ap.ssh self.wan_interface_override = wan_interface_override - def get_all_interface(self) -> List[str]: + def get_all_interface(self) -> list[str]: """Get all network interfaces on the device. Returns: interfaces_all: list of all the network interfaces on device """ output = self.ssh.run(GET_ALL_INTERFACE) - interfaces_all = output.stdout.split("\n") + interfaces_all = output.stdout.decode("utf-8").split("\n") return interfaces_all - def get_virtual_interface(self) -> List[str]: + def get_virtual_interface(self) -> list[str]: """Get all virtual interfaces on the device. Returns: interfaces_virtual: list of all the virtual interfaces on device """ output = self.ssh.run(GET_VIRTUAL_INTERFACE) - interfaces_virtual = output.stdout.split("\n") + interfaces_virtual = output.stdout.decode("utf-8").split("\n") return interfaces_virtual - def get_physical_interface(self) -> List[str]: + def get_physical_interface(self) -> list[str]: """Get all the physical interfaces of the device. Get all physical interfaces such as eth ports and wlan ports + Returns: interfaces_phy: list of all the physical interfaces """ @@ -82,26 +82,29 @@ return interfaces_phy - def get_bridge_interface(self) -> Optional[List[str]]: + def get_bridge_interface(self) -> list[str]: """Get all the bridge interfaces of the device. Returns: interfaces_bridge: the list of bridge interfaces, return None if bridge utility is not available on the device + + Raises: + ApInterfaceError: Failing to run brctl """ - interfaces_bridge = [] try: output = self.ssh.run(BRCTL_SHOW) - lines = output.stdout.split("\n") - for line in lines: - interfaces_bridge.append(line.split("\t")[0]) - interfaces_bridge.pop(0) - return [x for x in interfaces_bridge if x != ""] - except job.Error: - logging.info("No brctl utility is available") - return None + except CalledProcessError as e: + raise ApInterfacesError(f'failed to execute "{BRCTL_SHOW}"') from e - def get_wlan_interface(self) -> Tuple[str, str]: + lines = output.stdout.decode("utf-8").split("\n") + interfaces_bridge = [] + for line in lines: + interfaces_bridge.append(line.split("\t")[0]) + interfaces_bridge.pop(0) + return [x for x in interfaces_bridge if x != ""] + + def get_wlan_interface(self) -> tuple[str, str]: """Get all WLAN interfaces and specify 2.4 GHz and 5 GHz interfaces. Returns: @@ -114,9 +117,9 @@ interfaces_phy = self.get_physical_interface() for iface in interfaces_phy: output = self.ssh.run(f"iwlist {iface} freq") - if "Channel 06" in output.stdout and "Channel 36" not in output.stdout: + if b"Channel 06" in output.stdout and b"Channel 36" not in output.stdout: wlan_2g = iface - elif "Channel 36" in output.stdout and "Channel 06" not in output.stdout: + elif b"Channel 36" in output.stdout and b"Channel 06" not in output.stdout: wlan_5g = iface if wlan_2g is None or wlan_5g is None: @@ -149,12 +152,12 @@ return wan output = self.ssh.run("ifconfig") - interfaces_all = output.stdout.split("\n") + interfaces_all = output.stdout.decode("utf-8").split("\n") logging.info(f"IFCONFIG output = {interfaces_all}") raise ApInterfacesError("No WAN interface available") - def get_lan_interface(self) -> Optional[str]: + def get_lan_interface(self) -> str | None: """Get the LAN interface connecting to local devices. Returns: @@ -169,7 +172,7 @@ interfaces_eth.remove(interface_wan) for iface in interfaces_eth: output = self.ssh.run(f"ifconfig {iface}") - if "RUNNING" in output.stdout: + if b"RUNNING" in output.stdout: lan = iface break return lan @@ -185,5 +188,5 @@ try: self.ssh.run(f"ping -c 3 -I {iface} 8.8.8.8") return 1 - except job.Error: + except CalledProcessError: return 0
diff --git a/src/antlion/controllers/ap_lib/ap_iwconfig.py b/packages/antlion/controllers/ap_lib/ap_iwconfig.py similarity index 88% rename from src/antlion/controllers/ap_lib/ap_iwconfig.py rename to packages/antlion/controllers/ap_lib/ap_iwconfig.py index 225a397..d5b4556 100644 --- a/src/antlion/controllers/ap_lib/ap_iwconfig.py +++ b/packages/antlion/controllers/ap_lib/ap_iwconfig.py
@@ -14,9 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Optional, TYPE_CHECKING - -from antlion.libs.proc.job import Result +import subprocess +from typing import TYPE_CHECKING if TYPE_CHECKING: from antlion.controllers.access_point import AccessPoint @@ -40,8 +39,8 @@ self.ssh = ap.ssh def ap_iwconfig( - self, interface: str, arguments: Optional[str] = None - ) -> Optional[Result]: + self, interface: str, arguments: str | None = None + ) -> subprocess.CompletedProcess[bytes]: """Configure the wireless interface using iwconfig. Returns:
diff --git a/src/antlion/controllers/ap_lib/bridge_interface.py b/packages/antlion/controllers/ap_lib/bridge_interface.py similarity index 75% rename from src/antlion/controllers/ap_lib/bridge_interface.py rename to packages/antlion/controllers/ap_lib/bridge_interface.py index ee4733e..383d289 100644 --- a/src/antlion/controllers/ap_lib/bridge_interface.py +++ b/packages/antlion/controllers/ap_lib/bridge_interface.py
@@ -16,13 +16,14 @@ import logging import time -from antlion.libs.proc import job + +from antlion.runner import CalledProcessError _BRCTL = "brctl" BRIDGE_NAME = "br-lan" -CREATE_BRIDGE = "%s addbr %s" % (_BRCTL, BRIDGE_NAME) -DELETE_BRIDGE = "%s delbr %s" % (_BRCTL, BRIDGE_NAME) -BRING_DOWN_BRIDGE = "ifconfig %s down" % BRIDGE_NAME +CREATE_BRIDGE = f"{_BRCTL} addbr {BRIDGE_NAME}" +DELETE_BRIDGE = f"{_BRCTL} delbr {BRIDGE_NAME}" +BRING_DOWN_BRIDGE = f"ifconfig {BRIDGE_NAME} down" class BridgeInterfaceConfigs(object): @@ -64,35 +65,29 @@ # Create the bridge try: self.ssh.run(CREATE_BRIDGE) - except job.Error: + except CalledProcessError: logging.warning( - "Bridge interface {} already exists, no action needed".format( - BRIDGE_NAME - ) + f"Bridge interface {BRIDGE_NAME} already exists, no action needed" ) # Enable 4addr mode on for the wlan interface - ENABLE_4ADDR = "iw dev %s set 4addr on" % (brconfigs.iface_wlan) + ENABLE_4ADDR = f"iw dev {brconfigs.iface_wlan} set 4addr on" try: self.ssh.run(ENABLE_4ADDR) - except job.Error: - logging.warning( - "4addr is already enabled on {}".format(brconfigs.iface_wlan) - ) + except CalledProcessError: + logging.warning(f"4addr is already enabled on {brconfigs.iface_wlan}") # Add both LAN and WLAN interfaces to the bridge interface for interface in [brconfigs.iface_lan, brconfigs.iface_wlan]: - ADD_INTERFACE = "%s addif %s %s" % (_BRCTL, BRIDGE_NAME, interface) + ADD_INTERFACE = f"{_BRCTL} addif {BRIDGE_NAME} {interface}" try: self.ssh.run(ADD_INTERFACE) - except job.Error: - logging.warning( - "{} has already been added to {}".format(interface, BRIDGE_NAME) - ) + except CalledProcessError: + logging.warning(f"{interface} has already been added to {BRIDGE_NAME}") time.sleep(5) # Set IP address on the bridge interface to bring it up - SET_BRIDGE_IP = "ifconfig %s %s" % (BRIDGE_NAME, brconfigs.bridge_ip) + SET_BRIDGE_IP = f"ifconfig {BRIDGE_NAME} {brconfigs.bridge_ip}" self.ssh.run(SET_BRIDGE_IP) time.sleep(2) @@ -112,10 +107,10 @@ self.ssh.run(DELETE_BRIDGE) # Bring down wlan interface and disable 4addr mode - BRING_DOWN_WLAN = "ifconfig %s down" % brconfigs.iface_wlan + BRING_DOWN_WLAN = f"ifconfig {brconfigs.iface_wlan} down" self.ssh.run(BRING_DOWN_WLAN) time.sleep(2) - DISABLE_4ADDR = "iw dev %s set 4addr off" % (brconfigs.iface_wlan) + DISABLE_4ADDR = f"iw dev {brconfigs.iface_wlan} set 4addr off" self.ssh.run(DISABLE_4ADDR) time.sleep(1) logging.info("Bridge interface is down")
diff --git a/src/antlion/controllers/ap_lib/dhcp_config.py b/packages/antlion/controllers/ap_lib/dhcp_config.py similarity index 84% rename from src/antlion/controllers/ap_lib/dhcp_config.py rename to packages/antlion/controllers/ap_lib/dhcp_config.py index a50b6d0..5fa8cf0 100644 --- a/src/antlion/controllers/ap_lib/dhcp_config.py +++ b/packages/antlion/controllers/ap_lib/dhcp_config.py
@@ -13,6 +13,7 @@ # limitations under the License. import copy +from ipaddress import IPv4Address, IPv4Network _ROUTER_DNS = "8.8.8.8, 4.4.4.4" @@ -32,13 +33,13 @@ def __init__( self, - subnet, - start=None, - end=None, - router=None, - lease_time=None, - additional_parameters={}, - additional_options={}, + subnet: IPv4Network, + start: IPv4Address | None = None, + end: IPv4Address | None = None, + router: IPv4Address | None = None, + lease_time: int | None = None, + additional_parameters: dict[str, str] = {}, + additional_options: dict[str, int | str] = {}, ): """ Args: @@ -96,13 +97,12 @@ # configuration. The improved logic that we can use is: # a) erroring out if start and end encompass the whole network, and # b) picking any address before self.start or after self.end. - self.router = None for host in self.network.hosts(): if host < self.start or host > self.end: self.router = host break - if not self.router: + if not hasattr(self, "router"): raise ValueError("No useable host found.") self.lease_time = lease_time @@ -157,9 +157,9 @@ lines = [] if self.default_lease_time: - lines.append("default-lease-time %d;" % self.default_lease_time) + lines.append(f"default-lease-time {self.default_lease_time};") if self.max_lease_time: - lines.append("max-lease-time %s;" % self.max_lease_time) + lines.append(f"max-lease-time {self.max_lease_time};") for subnet in self.subnets: address = subnet.network.network_address @@ -173,31 +173,31 @@ lines.append("subnet %s netmask %s {" % (address, mask)) lines.append("\tpool {") - lines.append("\t\toption subnet-mask %s;" % mask) - lines.append("\t\toption routers %s;" % router) - lines.append("\t\trange %s %s;" % (start, end)) + lines.append(f"\t\toption subnet-mask {mask};") + lines.append(f"\t\toption routers {router};") + lines.append(f"\t\trange {start} {end};") if lease_time: - lines.append("\t\tdefault-lease-time %d;" % lease_time) - lines.append("\t\tmax-lease-time %d;" % lease_time) + lines.append(f"\t\tdefault-lease-time {lease_time};") + lines.append(f"\t\tmax-lease-time {lease_time};") for param, value in additional_parameters.items(): - lines.append("\t\t%s %s;" % (param, value)) + lines.append(f"\t\t{param} {value};") for option, value in additional_options.items(): - lines.append("\t\toption %s %s;" % (option, value)) + lines.append(f"\t\toption {option} {value};") lines.append("\t}") lines.append("}") for mapping in self.static_mappings: identifier = mapping.identifier fixed_address = mapping.ipv4_address - host_fake_name = "host%s" % identifier.replace(":", "") + host_fake_name = f"host{identifier.replace(':', '')}" lease_time = mapping.lease_time lines.append("host %s {" % host_fake_name) - lines.append("\thardware ethernet %s;" % identifier) - lines.append("\tfixed-address %s;" % fixed_address) + lines.append(f"\thardware ethernet {identifier};") + lines.append(f"\tfixed-address {fixed_address};") if lease_time: - lines.append("\tdefault-lease-time %d;" % lease_time) - lines.append("\tmax-lease-time %d;" % lease_time) + lines.append(f"\tdefault-lease-time {lease_time};") + lines.append(f"\tmax-lease-time {lease_time};") lines.append("}") config_str = "\n".join(lines)
diff --git a/src/antlion/controllers/ap_lib/dhcp_server.py b/packages/antlion/controllers/ap_lib/dhcp_server.py similarity index 80% rename from src/antlion/controllers/ap_lib/dhcp_server.py rename to packages/antlion/controllers/ap_lib/dhcp_server.py index c52983b..dd3f608 100644 --- a/src/antlion/controllers/ap_lib/dhcp_server.py +++ b/packages/antlion/controllers/ap_lib/dhcp_server.py
@@ -12,13 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging import time +from mobly import logger from tenacity import retry, retry_if_exception_type, stop_after_attempt, wait_fixed from antlion.controllers.ap_lib.dhcp_config import DhcpConfig from antlion.controllers.utils_lib.commands import shell -from antlion import logger +from antlion.runner import Runner class Error(Exception): @@ -40,7 +42,7 @@ PROGRAM_FILE = "dhcpd" - def __init__(self, runner, interface, working_dir="/tmp"): + def __init__(self, runner: Runner, interface: str, working_dir: str = "/tmp"): """ Args: runner: Object that has a run_async and run methods for running @@ -48,16 +50,21 @@ interface: string, The name of the interface to use. working_dir: The directory to work out of. """ - self._log = logger.create_logger(lambda msg: f"[DHCP Server|{interface}] {msg}") + self._log = logger.PrefixLoggerAdapter( + logging.getLogger(), + { + logger.PrefixLoggerAdapter.EXTRA_KEY_LOG_PREFIX: f"[DHCP Server|{interface}]", + }, + ) self._runner = runner self._working_dir = working_dir - self._shell = shell.ShellCommand(runner, working_dir) - self._stdio_log_file = f"dhcpd_{interface}.log" - self._config_file = f"dhcpd_{interface}.conf" - self._lease_file = f"dhcpd_{interface}.leases" - self._pid_file = f"dhcpd_{interface}.pid" - self._identifier = f"{self.PROGRAM_FILE}.*{self._config_file}" + self._shell = shell.ShellCommand(runner) + self._stdio_log_file = f"{working_dir}/dhcpd_{interface}.log" + self._config_file = f"{working_dir}/dhcpd_{interface}.conf" + self._lease_file = f"{working_dir}/dhcpd_{interface}.leases" + self._pid_file = f"{working_dir}/dhcpd_{interface}.pid" + self._identifier: int | None = None # There is a slight timing issue where if the proc filesystem in Linux # doesn't get updated in time as when this is called, the NoInterfaceError @@ -98,29 +105,32 @@ base_command = f'cd "{self._working_dir}"; {dhcpd_command}' job_str = f'{base_command} > "{self._stdio_log_file}" 2>&1' - self._runner.run_async(job_str) + self._identifier = int(self._runner.run_async(job_str).stdout) try: self._wait_for_process(timeout=timeout_sec) self._wait_for_server(timeout=timeout_sec) except: - self._log.warn("Failed to start DHCP server.") - self._log.info("DHCP configuration:\n" + config.render_config_file() + "\n") - self._log.info("DHCP logs:\n" + self.get_logs() + "\n") + self._log.warning("Failed to start DHCP server.") + self._log.info(f"DHCP configuration:\n{config.render_config_file()}\n") + self._log.info(f"DHCP logs:\n{self.get_logs()}\n") self.stop() raise - def stop(self): + def stop(self) -> None: """Kills the daemon if it is running.""" - if self.is_alive(): + if self._identifier and self.is_alive(): self._shell.kill(self._identifier) + self._identifier = None - def is_alive(self): + def is_alive(self) -> bool: """ Returns: True if the daemon is running. """ - return self._shell.is_alive(self._identifier) + if self._identifier: + return self._shell.is_alive(self._identifier) + return False def get_logs(self) -> str: """Pulls the log files from where dhcp server is running. @@ -130,7 +140,7 @@ """ return self._shell.read_file(self._stdio_log_file) - def _wait_for_process(self, timeout=60): + def _wait_for_process(self, timeout: float = 60) -> None: """Waits for the process to come up. Waits until the dhcp server process is found running, or there is @@ -146,7 +156,7 @@ self._scan_for_errors(True) - def _wait_for_server(self, timeout=60): + def _wait_for_server(self, timeout: float = 60) -> None: """Waits for dhcp server to report that the server is up. Waits until dhcp server says the server has been brought up or an @@ -164,7 +174,7 @@ self._scan_for_errors(True) - def _scan_for_errors(self, should_be_up): + def _scan_for_errors(self, should_be_up: bool) -> None: """Scans the dhcp server log for any errors. Args: @@ -195,7 +205,7 @@ if should_be_up and is_dead: raise Error("Dhcp server failed to start.", self) - def _write_configs(self, config): + def _write_configs(self, config: DhcpConfig) -> None: """Writes the configs to the dhcp server config file.""" self._shell.delete_file(self._config_file) config_str = config.render_config_file()
diff --git a/src/antlion/controllers/ap_lib/extended_capabilities.py b/packages/antlion/controllers/ap_lib/extended_capabilities.py similarity index 97% rename from src/antlion/controllers/ap_lib/extended_capabilities.py rename to packages/antlion/controllers/ap_lib/extended_capabilities.py index 82029cc..4570409 100644 --- a/src/antlion/controllers/ap_lib/extended_capabilities.py +++ b/packages/antlion/controllers/ap_lib/extended_capabilities.py
@@ -15,7 +15,6 @@ # limitations under the License. from enum import IntEnum, unique -from typing import Tuple @unique @@ -119,7 +118,7 @@ # 88-n reserved -def _offsets(ext_cap_offset: ExtendedCapability) -> Tuple[int, int]: +def _offsets(ext_cap_offset: ExtendedCapability) -> tuple[int, int]: """For given capability, return the byte and bit offsets within the field. 802.11 divides the extended capability field into bytes, as does the @@ -166,7 +165,7 @@ ExtendedCapability.MAX_NUMBER_OF_MSDUS_IN_A_MSDU, ]: raise NotImplementedError( - f"{ext_cap.name} not implemented yet by {__class__}" + f"{ext_cap.name} not implemented yet by {self.__class__}" ) byte_offset, bit_offset = _offsets(ext_cap) if len(self._ext_cap) > byte_offset:
diff --git a/src/antlion/controllers/ap_lib/hostapd.py b/packages/antlion/controllers/ap_lib/hostapd.py similarity index 71% rename from src/antlion/controllers/ap_lib/hostapd.py rename to packages/antlion/controllers/ap_lib/hostapd.py index b3f780d..87a0bb2 100644 --- a/src/antlion/controllers/ap_lib/hostapd.py +++ b/packages/antlion/controllers/ap_lib/hostapd.py
@@ -17,16 +17,19 @@ import logging import re import time - -from typing import Any, Dict, Optional, Set +from datetime import datetime, timezone +from subprocess import CalledProcessError +from typing import Any, Iterable from antlion.controllers.ap_lib import hostapd_constants from antlion.controllers.ap_lib.extended_capabilities import ExtendedCapabilities +from antlion.controllers.ap_lib.hostapd_config import HostapdConfig from antlion.controllers.ap_lib.wireless_network_management import ( BssTransitionManagementRequest, ) from antlion.controllers.utils_lib.commands import shell -from antlion.libs.proc.job import Result +from antlion.logger import LogLevel +from antlion.runner import Runner PROGRAM_FILE = "/usr/sbin/hostapd" CLI_PROGRAM_FILE = "/usr/bin/hostapd_cli" @@ -43,7 +46,9 @@ config: The hostapd configuration that is being used. """ - def __init__(self, runner: Any, interface: str, working_dir: str = "/tmp") -> None: + def __init__( + self, runner: Runner, interface: str, working_dir: str = "/tmp" + ) -> None: """ Args: runner: Object that has run_async and run methods for executing @@ -54,18 +59,18 @@ self._runner = runner self._interface = interface self._working_dir = working_dir - self.config = None - self._shell = shell.ShellCommand(runner, working_dir) - self._log_file = f"hostapd-{self._interface}.log" - self._ctrl_file = f"hostapd-{self._interface}.ctrl" - self._config_file = f"hostapd-{self._interface}.conf" + self.config: HostapdConfig | None = None + self._shell = shell.ShellCommand(runner) + self._log_file = f"{working_dir}/hostapd-{self._interface}.log" + self._ctrl_file = f"{working_dir}/hostapd-{self._interface}.ctrl" + self._config_file = f"{working_dir}/hostapd-{self._interface}.conf" self._identifier = f"{PROGRAM_FILE}.*{self._config_file}" def start( self, - config: Any, + config: HostapdConfig, timeout: int = 60, - additional_parameters: Optional[Dict[str, Any]] = None, + additional_parameters: dict[str, Any] | None = None, ) -> None: """Starts hostapd @@ -86,15 +91,17 @@ it's impossible to wait on. If you need to check if configs are ok then periodic checks to is_running and logs should be used. """ - if self.is_alive(): - self.stop() + if additional_parameters is None: + additional_parameters = {} + + self.stop() self.config = config self._shell.delete_file(self._ctrl_file) self._shell.delete_file(self._log_file) self._shell.delete_file(self._config_file) - self._write_configs(additional_parameters=additional_parameters) + self._write_configs(additional_parameters) hostapd_command = f'{PROGRAM_FILE} -dd -t "{self._config_file}"' base_command = f'cd "{self._working_dir}"; {hostapd_command}' @@ -135,7 +142,7 @@ """ status_cmd = "status" result = self._run_hostapd_cli_cmd(status_cmd) - match = re.search(r"^channel=(\d+)$", result.stdout, re.MULTILINE) + match = re.search(r"^channel=(\d+)$", result, re.MULTILINE) if not match: raise Error("Current channel could not be determined") try: @@ -144,36 +151,26 @@ raise Error("Internal error: current channel could not be parsed") return channel - def _list_sta(self) -> Result: - """List all associated STA MAC addresses. - - Returns: - acts.libs.proc.job.Result containing the results of the command. - Raises: See _run_hostapd_cli_cmd - """ - list_sta_cmd = "list_sta" - return self._run_hostapd_cli_cmd(list_sta_cmd) - - def get_stas(self) -> Set[str]: + def get_stas(self) -> set[str]: """Return MAC addresses of all associated STAs.""" - list_sta_result = self._list_sta() + list_sta_result = self._run_hostapd_cli_cmd("list_sta") stas = set() - for line in list_sta_result.stdout.splitlines(): + for line in list_sta_result.splitlines(): # Each line must be a valid MAC address. Capture it. m = re.match(r"((?:[0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2})", line) if m: stas.add(m.group(1)) return stas - def _sta(self, sta_mac: str) -> Result: + def _sta(self, sta_mac: str) -> str: """Return hostapd's detailed info about an associated STA. Returns: - acts.libs.proc.job.Result containing the results of the command. + Results of the command. + Raises: See _run_hostapd_cli_cmd """ - sta_cmd = "sta {}".format(sta_mac) - return self._run_hostapd_cli_cmd(sta_cmd) + return self._run_hostapd_cli_cmd(f"sta {sta_mac}") def get_sta_extended_capabilities(self, sta_mac: str) -> ExtendedCapabilities: """Get extended capabilities for the given STA, as seen by the AP. @@ -189,7 +186,7 @@ # hostapd ext_capab field is a hex encoded string representation of the # 802.11 extended capabilities structure, each byte represented by two # chars (each byte having format %02x). - m = re.search(r"ext_capab=([0-9A-Faf]+)", sta_result.stdout, re.MULTILINE) + m = re.search(r"ext_capab=([0-9A-Faf]+)", sta_result, re.MULTILINE) if not m: raise Error("Failed to get ext_capab from STA details") raw_ext_capab = m.group(1) @@ -198,9 +195,51 @@ except ValueError: raise Error(f"ext_capab contains invalid hex string repr {raw_ext_capab}") + def sta_authenticated(self, sta_mac: str) -> bool: + """Is the given STA authenticated? + + Args: + sta_mac: MAC address of the STA in question. + Returns: + True if AP sees that the STA is authenticated, False otherwise. + Raises: + Error if authenticated status for the STA cannot be obtained. + """ + sta_result = self._sta(sta_mac) + m = re.search(r"flags=.*\[AUTH\]", sta_result, re.MULTILINE) + return bool(m) + + def sta_associated(self, sta_mac: str) -> bool: + """Is the given STA associated? + + Args: + sta_mac: MAC address of the STA in question. + Returns: + True if AP sees that the STA is associated, False otherwise. + Raises: + Error if associated status for the STA cannot be obtained. + """ + sta_result = self._sta(sta_mac) + m = re.search(r"flags=.*\[ASSOC\]", sta_result, re.MULTILINE) + return bool(m) + + def sta_authorized(self, sta_mac: str) -> bool: + """Is the given STA authorized (802.1X controlled port open)? + + Args: + sta_mac: MAC address of the STA in question. + Returns: + True if AP sees that the STA is 802.1X authorized, False otherwise. + Raises: + Error if authorized status for the STA cannot be obtained. + """ + sta_result = self._sta(sta_mac) + m = re.search(r"flags=.*\[AUTHORIZED\]", sta_result, re.MULTILINE) + return bool(m) + def _bss_tm_req( self, client_mac: str, request: BssTransitionManagementRequest - ) -> Result: + ) -> None: """Send a hostapd BSS Transition Management request command to a STA. Args: @@ -228,21 +267,22 @@ bss_tm_req_cmd += f" valid_int={request.validity_interval}" # neighbor= can appear multiple times, so it requires special handling. - for neighbor in request.candidate_list: - bssid = neighbor.bssid - bssid_info = hex(neighbor.bssid_information) - op_class = neighbor.operating_class - chan_num = neighbor.channel_number - phy_type = int(neighbor.phy_type) - bss_tm_req_cmd += ( - f" neighbor={bssid},{bssid_info},{op_class},{chan_num},{phy_type}" - ) + if request.candidate_list is not None: + for neighbor in request.candidate_list: + bssid = neighbor.bssid + bssid_info = hex(neighbor.bssid_information) + op_class = neighbor.operating_class + chan_num = neighbor.channel_number + phy_type = int(neighbor.phy_type) + bss_tm_req_cmd += ( + f" neighbor={bssid},{bssid_info},{op_class},{chan_num},{phy_type}" + ) - return self._run_hostapd_cli_cmd(bss_tm_req_cmd) + self._run_hostapd_cli_cmd(bss_tm_req_cmd) def send_bss_transition_management_req( self, sta_mac: str, request: BssTransitionManagementRequest - ) -> Result: + ) -> None: """Send a BSS Transition Management request to an associated STA. Args: @@ -252,7 +292,7 @@ acts.libs.proc.job.Result containing the results of the command. Raises: See _run_hostapd_cli_cmd """ - return self._bss_tm_req(sta_mac, request) + self._bss_tm_req(sta_mac, request) def is_alive(self) -> bool: """ @@ -268,29 +308,50 @@ A string of the hostapd logs. """ # TODO: Auto pulling of logs when stop is called. - return self._shell.read_file(self._log_file) + with LogLevel(self._runner.log, logging.INFO): + log = self._shell.read_file(self._log_file) - def _run_hostapd_cli_cmd(self, cmd: str) -> Result: + # Convert epoch to human-readable times + result: list[str] = [] + for line in log.splitlines(): + try: + end = line.index(":") + epoch = float(line[:end]) + timestamp = datetime.fromtimestamp(epoch, timezone.utc).strftime( + "%m-%d %H:%M:%S.%f" + ) + result.append(f"{timestamp} {line[end+1:]}") + except ValueError: # Colon not found or float conversion failure + result.append(line) + + return "\n".join(result) + + def _run_hostapd_cli_cmd(self, cmd: str) -> str: """Run the given hostapd_cli command. Runs the command, waits for the output (up to default timeout), and returns the result. Returns: - acts.libs.proc.job.Result containing the results of the ssh command. + Results of the ssh command. Raises: - acts.lib.proc.job.TimeoutError: When the remote command took too + subprocess.TimeoutExpired: When the remote command took too long to execute. antlion.controllers.utils_lib.ssh.connection.Error: When the ssh connection failed to be created. - antlion.controllers.utils_lib.ssh.connection.CommandError: Ssh worked, - but the command had an error executing. + subprocess.CalledProcessError: Ssh worked, but the command had an + error executing. """ hostapd_cli_job = ( f"cd {self._working_dir}; " f"{CLI_PROGRAM_FILE} -p {self._ctrl_file} {cmd}" ) - return self._runner.run(hostapd_cli_job) + proc = self._runner.run(hostapd_cli_job) + if proc.returncode: + raise CalledProcessError( + proc.returncode, hostapd_cli_job, proc.stdout, proc.stderr + ) + return proc.stdout.decode("utf-8") def _wait_for_process(self, timeout: int = 60) -> None: """Waits for the process to come up. @@ -353,18 +414,16 @@ if should_be_up and is_dead: raise Error("Hostapd failed to start", self) - def _write_configs( - self, additional_parameters: Optional[Dict[str, Any]] = None - ) -> None: + def _write_configs(self, additional_parameters: dict[str, Any]) -> None: """Writes the configs to the hostapd config file.""" self._shell.delete_file(self._config_file) interface_configs = collections.OrderedDict() interface_configs["interface"] = self._interface interface_configs["ctrl_interface"] = self._ctrl_file - pairs = (f"{k}={v}" for k, v in interface_configs.items()) + pairs: Iterable[str] = (f"{k}={v}" for k, v in interface_configs.items()) - packaged_configs = self.config.package_configs() + packaged_configs = self.config.package_configs() if self.config else [] if additional_parameters: packaged_configs.append(additional_parameters) for packaged_config in packaged_configs:
diff --git a/src/antlion/controllers/ap_lib/hostapd_ap_preset.py b/packages/antlion/controllers/ap_lib/hostapd_ap_preset.py similarity index 91% rename from src/antlion/controllers/ap_lib/hostapd_ap_preset.py rename to packages/antlion/controllers/ap_lib/hostapd_ap_preset.py index 3b694c0..6a11120 100644 --- a/src/antlion/controllers/ap_lib/hostapd_ap_preset.py +++ b/packages/antlion/controllers/ap_lib/hostapd_ap_preset.py
@@ -12,9 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, FrozenSet, List, Optional +from __future__ import annotations -from antlion import utils +from typing import Any, FrozenSet, TypeVar + from antlion.controllers.ap_lib import hostapd_config, hostapd_constants, hostapd_utils from antlion.controllers.ap_lib.hostapd_security import Security from antlion.controllers.ap_lib.third_party_ap_profiles import ( @@ -27,8 +28,10 @@ tplink, ) +T = TypeVar("T") -def _get_or_default(var: Optional[Any], default_value: Any) -> Any: + +def _get_or_default(var: T | None, default_value: T) -> T: """Check variable and return non-null value. Args: @@ -42,28 +45,28 @@ def create_ap_preset( + iface_wlan_2g: str, + iface_wlan_5g: str, profile_name: str = "whirlwind", - iface_wlan_2g: Optional[str] = None, - iface_wlan_5g: Optional[str] = None, - channel: Optional[int] = None, - mode: Optional[str] = None, - frequency: Optional[int] = None, - security: Optional[Security] = None, - pmf_support: Optional[int] = None, - ssid: Optional[str] = None, - hidden: Optional[bool] = None, - dtim_period: Optional[int] = None, - frag_threshold: Optional[int] = None, - rts_threshold: Optional[int] = None, - force_wmm: Optional[bool] = None, - beacon_interval: Optional[int] = None, - short_preamble: Optional[bool] = None, - n_capabilities: Optional[List[Any]] = None, - ac_capabilities: Optional[List[Any]] = None, - vht_bandwidth: Optional[int] = None, + channel: int | None = None, + mode: str | None = None, + frequency: int | None = None, + security: Security | None = None, + pmf_support: int | None = None, + ssid: str | None = None, + hidden: bool | None = None, + dtim_period: int | None = None, + frag_threshold: int | None = None, + rts_threshold: int | None = None, + force_wmm: bool | None = None, + beacon_interval: int | None = None, + short_preamble: bool | None = None, + n_capabilities: list[Any] | None = None, + ac_capabilities: list[Any] | None = None, + vht_bandwidth: int | None = None, wnm_features: FrozenSet[hostapd_constants.WnmFeature] = frozenset(), - bss_settings: List[Any] = [], -): + bss_settings: list[Any] = [], +) -> hostapd_config.HostapdConfig: """AP preset config generator. This a wrapper for hostapd_config but but supplies the default settings for the preset that is selected. @@ -74,12 +77,12 @@ Args: profile_name: The name of the device want the preset for. Options: whirlwind - channel: int, channel number. - dtim: int, DTIM value of the AP, default is 2. - frequency: int, frequency of channel. + channel: Channel number. + dtim: DTIM value of the AP, default is 2. + frequency: Frequency of channel. security: The security settings to use. - ssid: string, The name of the ssid to broadcast. - pmf_support: int, whether pmf is disabled, enabled, or required + ssid: The name of the ssid to broadcast. + pmf_support: Whether pmf is disabled, enabled, or required vht_bandwidth: VHT bandwidth for 11ac operation. bss_settings: The settings for all bss. iface_wlan_2g: the wlan 2g interface name of the AP. @@ -97,17 +100,20 @@ Returns: A hostapd_config object that can be used by the hostapd object. """ + if security is None: + security = Security() # Verify interfaces hostapd_utils.verify_interface(iface_wlan_2g, hostapd_constants.INTERFACE_2G_LIST) hostapd_utils.verify_interface(iface_wlan_5g, hostapd_constants.INTERFACE_5G_LIST) - if channel: + if channel is not None: frequency = hostapd_config.get_frequency_for_channel(channel) - elif frequency: + elif frequency is not None: channel = hostapd_config.get_channel_for_frequency(frequency) - else: - raise ValueError("Specify either frequency or channel.") + + if channel is None or frequency is None: + raise ValueError("Must specify channel or frequency") if profile_name == "whirlwind": # profile indicates phy mode is 11bgn for 2.4Ghz or 11acn for 5Ghz @@ -164,7 +170,7 @@ extended_channel = hostapd_constants.N_CAPABILITY_HT20 # Define the n capability vector for 20 MHz and higher bandwidth if not vht_bandwidth: - pass + n_capabilities = _get_or_default(n_capabilities, []) elif vht_bandwidth >= 40: n_capabilities = _get_or_default( n_capabilities, @@ -217,6 +223,7 @@ frequency=frequency, frag_threshold=frag_threshold, rts_threshold=rts_threshold, + wnm_features=wnm_features, n_capabilities=n_capabilities, ac_capabilities=ac_capabilities, bss_settings=bss_settings, @@ -288,6 +295,12 @@ "vendor_elements": "dd0cf4f5e80505ff0000ffffffff" "070a75732024041e95051e00" } default_configs = {"bridge": "br-lan", "iapp_interface": "br-lan"} + additional_params = ( + vendor_elements + | default_configs + | hostapd_constants.ENABLE_RRM_BEACON_REPORT + | hostapd_constants.ENABLE_RRM_NEIGHBOR_REPORT + ) if frequency < 5000: interface = iface_wlan_2g @@ -304,12 +317,6 @@ ], ) - additional_params = utils.merge_dicts( - vendor_elements, - hostapd_constants.ENABLE_RRM_BEACON_REPORT, - hostapd_constants.ENABLE_RRM_NEIGHBOR_REPORT, - default_configs, - ) config = hostapd_config.HostapdConfig( ssid=ssid, hidden=hidden, @@ -385,12 +392,6 @@ ], ) - additional_params = utils.merge_dicts( - vendor_elements, - hostapd_constants.ENABLE_RRM_BEACON_REPORT, - hostapd_constants.ENABLE_RRM_NEIGHBOR_REPORT, - default_configs, - ) config = hostapd_config.HostapdConfig( ssid=ssid, hidden=hidden,
diff --git a/src/antlion/controllers/ap_lib/hostapd_bss_settings.py b/packages/antlion/controllers/ap_lib/hostapd_bss_settings.py similarity index 62% rename from src/antlion/controllers/ap_lib/hostapd_bss_settings.py rename to packages/antlion/controllers/ap_lib/hostapd_bss_settings.py index 56a5422..2f4d261 100644 --- a/src/antlion/controllers/ap_lib/hostapd_bss_settings.py +++ b/packages/antlion/controllers/ap_lib/hostapd_bss_settings.py
@@ -14,6 +14,8 @@ import collections +from antlion.controllers.ap_lib.hostapd_security import Security + class BssSettings(object): """Settings for a bss. @@ -21,22 +23,30 @@ Settings for a bss to allow multiple network on a single device. Attributes: - name: string, The name that this bss will go by. - ssid: string, The name of the ssid to brodcast. - hidden: bool, If true then the ssid will be hidden. - security: Security, The security settings to use. + name: The name that this bss will go by. + ssid: The name of the ssid to broadcast. + hidden: If true then the ssid will be hidden. + security: The security settings to use. + bssid: The bssid to use. """ - def __init__(self, name, ssid, hidden=False, security=None, bssid=None): + def __init__( + self, + name: str, + ssid: str, + security: Security, + hidden: bool = False, + bssid: str | None = None, + ): self.name = name self.ssid = ssid - self.hidden = hidden self.security = security + self.hidden = hidden self.bssid = bssid - def generate_dict(self): + def generate_dict(self) -> dict[str, str | int]: """Returns: A dictionary of bss settings.""" - settings = collections.OrderedDict() + settings: dict[str, str | int] = collections.OrderedDict() settings["bss"] = self.name if self.bssid: settings["bssid"] = self.bssid @@ -44,9 +54,8 @@ settings["ssid"] = self.ssid settings["ignore_broadcast_ssid"] = 1 if self.hidden else 0 - if self.security: - security_settings = self.security.generate_dict() - for k, v in security_settings.items(): - settings[k] = v + security_settings = self.security.generate_dict() + for k, v in security_settings.items(): + settings[k] = v return settings
diff --git a/src/antlion/controllers/ap_lib/hostapd_config.py b/packages/antlion/controllers/ap_lib/hostapd_config.py similarity index 78% rename from src/antlion/controllers/ap_lib/hostapd_config.py rename to packages/antlion/controllers/ap_lib/hostapd_config.py index a886e04..749e585 100644 --- a/src/antlion/controllers/ap_lib/hostapd_config.py +++ b/packages/antlion/controllers/ap_lib/hostapd_config.py
@@ -14,12 +14,14 @@ import collections import logging -from typing import FrozenSet +from typing import Any, FrozenSet from antlion.controllers.ap_lib import hostapd_constants +from antlion.controllers.ap_lib.hostapd_bss_settings import BssSettings +from antlion.controllers.ap_lib.hostapd_security import Security, SecurityMode -def ht40_plus_allowed(channel): +def ht40_plus_allowed(channel: int): """Returns: True iff HT40+ is enabled for this configuration.""" channel_supported = ( channel @@ -30,7 +32,7 @@ return channel_supported -def ht40_minus_allowed(channel): +def ht40_minus_allowed(channel: int): """Returns: True iff HT40- is enabled for this configuration.""" channel_supported = ( channel @@ -41,11 +43,11 @@ return channel_supported -def get_frequency_for_channel(channel): +def get_frequency_for_channel(channel: int): """The frequency associated with a given channel number. Args: - value: int channel number. + value: channel number. Returns: int, frequency in MHz associated with the channel. @@ -55,14 +57,14 @@ if channel == channel_iter: return frequency else: - raise ValueError("Unknown channel value: %r." % channel) + raise ValueError(f"Unknown channel value: {channel!r}.") -def get_channel_for_frequency(frequency): +def get_channel_for_frequency(frequency: int): """The channel number associated with a given frequency. Args: - value: int frequency in MHz. + value: frequency in MHz. Returns: int, frequency associated with the channel. @@ -77,7 +79,203 @@ All the settings for a router that are not part of an ssid. """ - def _get_11ac_center_channel_from_channel(self, channel): + def __init__( + self, + interface: str | None = None, + mode: str | None = None, + channel: int | None = None, + frequency: int | None = None, + n_capabilities: list[Any] | None = None, + beacon_interval: int | None = None, + dtim_period: int | None = None, + frag_threshold: int | None = None, + rts_threshold: int | None = None, + short_preamble: bool | None = None, + ssid: str | None = None, + hidden: bool = False, + security: Security | None = None, + bssid: str | None = None, + force_wmm: bool | None = None, + pmf_support: int | None = None, + obss_interval: int | None = None, + vht_channel_width: Any | None = None, + vht_center_channel: int | None = None, + ac_capabilities: list[Any] | None = None, + beacon_footer: str = "", + spectrum_mgmt_required: bool | None = None, + scenario_name: str | None = None, + min_streams: int | None = None, + wnm_features: FrozenSet[hostapd_constants.WnmFeature] = frozenset(), + bss_settings: list[Any] | None = None, + additional_parameters: dict[str, Any] | None = None, + set_ap_defaults_profile: str = "whirlwind", + ) -> None: + """Construct a HostapdConfig. + + You may specify channel or frequency, but not both. Both options + are checked for validity (i.e. you can't specify an invalid channel + or a frequency that will not be accepted). + + Args: + interface: The name of the interface to use. + mode: MODE_11x defined above. + channel: Channel number. + frequency: Frequency of channel. + n_capabilities: List of N_CAPABILITY_x defined above. + beacon_interval: Beacon interval of AP. + dtim_period: Include a DTIM every |dtim_period| beacons. + frag_threshold: Maximum outgoing data frame size. + rts_threshold: Maximum packet size without requiring explicit + protection via rts/cts or cts to self. + short_preamble: Whether to use a short preamble. + ssid: string, The name of the ssid to broadcast. + hidden: Should the ssid be hidden. + security: The security settings to use. + bssid: A MAC address like string for the BSSID. + force_wmm: True if we should force WMM on, False if we should + force it off, None if we shouldn't force anything. + pmf_support: One of PMF_SUPPORT_* above. Controls whether the + client supports/must support 802.11w. If None, defaults to + required with wpa3, else defaults to disabled. + obss_interval: Interval in seconds that client should be + required to do background scans for overlapping BSSes. + vht_channel_width: Object channel width + vht_center_channel: Center channel of segment 0. + ac_capabilities: List of AC_CAPABILITY_x defined above. + beacon_footer: Containing (not validated) IE data to be + placed at the end of the beacon. + spectrum_mgmt_required: True if we require the DUT to support + spectrum management. + scenario_name: To be included in file names, instead + of the interface name. + min_streams: Number of spatial streams required. + wnm_features: WNM features to enable on the AP. + control_interface: The file name to use as the control interface. + bss_settings: The settings for all bss. + additional_parameters: A dictionary of additional parameters to add + to the hostapd config. + set_ap_defaults_profile: profile name to load defaults from + """ + if n_capabilities is None: + n_capabilities = [] + if ac_capabilities is None: + ac_capabilities = [] + if bss_settings is None: + bss_settings = [] + if additional_parameters is None: + additional_parameters = {} + if security is None: + security = Security() + + self.set_ap_defaults_profile = set_ap_defaults_profile + self._interface = interface + if channel is not None and frequency is not None: + raise ValueError("Specify either frequency or channel " "but not both.") + + unknown_caps = [ + cap + for cap in n_capabilities + if cap not in hostapd_constants.N_CAPABILITIES_MAPPING + ] + if unknown_caps: + raise ValueError(f"Unknown capabilities: {unknown_caps!r}") + + if channel: + self.channel = channel + elif frequency: + self.frequency = frequency + else: + raise ValueError("Specify either frequency or channel.") + + self._n_capabilities = set(n_capabilities) + if force_wmm is not None: + self._wmm_enabled = force_wmm + elif self._n_capabilities: + self._wmm_enabled = True + if self._n_capabilities and mode is None: + mode = hostapd_constants.MODE_11N_PURE + self._mode = mode + + if not self.supports_frequency(self.frequency): + raise ValueError( + "Configured a mode %s that does not support " + "frequency %d" % (self._mode, self.frequency) + ) + + self._beacon_interval = beacon_interval + self._dtim_period = dtim_period + self._frag_threshold = frag_threshold + self._rts_threshold = rts_threshold + self._short_preamble = short_preamble + self._ssid = ssid + self._hidden = hidden + self._security = security + self._bssid = bssid + # Default PMF Values + if pmf_support is None: + if self.security and self.security.security_mode is SecurityMode.WPA3: + # Set PMF required for WP3 + self._pmf_support = hostapd_constants.PMF_SUPPORT_REQUIRED + elif self.security and self.security.security_mode.is_wpa3(): + # Default PMF to enabled for WPA3 mixed modes (can be + # overwritten by explicitly provided value) + self._pmf_support = hostapd_constants.PMF_SUPPORT_ENABLED + else: + # Default PMD to disabled for all other modes (can be + # overwritten by explicitly provided value) + self._pmf_support = hostapd_constants.PMF_SUPPORT_DISABLED + elif pmf_support not in hostapd_constants.PMF_SUPPORT_VALUES: + raise ValueError(f"Invalid value for pmf_support: {pmf_support!r}") + elif ( + pmf_support != hostapd_constants.PMF_SUPPORT_REQUIRED + and self.security + and self.security.security_mode is SecurityMode.WPA3 + ): + raise ValueError("PMF support must be required with wpa3.") + else: + self._pmf_support = pmf_support + self._obss_interval = obss_interval + if self.is_11ac: + if str(vht_channel_width) == "40" or str(vht_channel_width) == "20": + self._vht_oper_chwidth = hostapd_constants.VHT_CHANNEL_WIDTH_40 + elif str(vht_channel_width) == "80": + self._vht_oper_chwidth = hostapd_constants.VHT_CHANNEL_WIDTH_80 + elif str(vht_channel_width) == "160": + self._vht_oper_chwidth = hostapd_constants.VHT_CHANNEL_WIDTH_160 + elif str(vht_channel_width) == "80+80": + self._vht_oper_chwidth = hostapd_constants.VHT_CHANNEL_WIDTH_80_80 + elif vht_channel_width is not None: + raise ValueError("Invalid channel width") + else: + logging.warning( + "No channel bandwidth specified. Using 80MHz for 11ac." + ) + self._vht_oper_chwidth = 1 + if vht_center_channel is not None: + self._vht_oper_centr_freq_seg0_idx = vht_center_channel + elif vht_channel_width == 20 and channel is not None: + self._vht_oper_centr_freq_seg0_idx = channel + else: + self._vht_oper_centr_freq_seg0_idx = ( + self._get_11ac_center_channel_from_channel(self.channel) + ) + self._ac_capabilities = set(ac_capabilities) + self._beacon_footer = beacon_footer + self._spectrum_mgmt_required = spectrum_mgmt_required + self._scenario_name = scenario_name + self._min_streams = min_streams + self._wnm_features = wnm_features + self._additional_parameters = additional_parameters + + self._bss_lookup: dict[str, BssSettings] = collections.OrderedDict() + for bss in bss_settings: + if bss.name in self._bss_lookup: + raise ValueError( + "Cannot have multiple bss settings with the same name." + ) + self._bss_lookup[bss.name] = bss + + def _get_11ac_center_channel_from_channel(self, channel: int) -> int: """Returns the center channel of the selected channel band based on the channel and channel bandwidth provided. """ @@ -92,11 +290,7 @@ lower_channel_bound, upper_channel_bound = channel_map if lower_channel_bound <= channel <= upper_channel_bound: return lower_channel_bound + center_channel_delta - raise ValueError( - "Invalid channel for {channel_width}.".format( - channel_width=self._vht_oper_chwidth - ) - ) + raise ValueError(f"Invalid channel for {self._vht_oper_chwidth}.") @property def _get_default_config(self): @@ -200,20 +394,20 @@ self.frequency = get_frequency_for_channel(value) @property - def bssid(self): + def bssid(self) -> str | None: return self._bssid @bssid.setter - def bssid(self, value): + def bssid(self, value: str): self._bssid = value @property - def frequency(self): - """Returns: int, frequency for hostapd to listen on.""" + def frequency(self) -> int: + """Returns: frequency for hostapd to listen on.""" return self._frequency @frequency.setter - def frequency(self, value): + def frequency(self, value: int): """Sets the frequency for hostapd to listen on. Args: @@ -221,21 +415,21 @@ """ if value not in hostapd_constants.CHANNEL_MAP: - raise ValueError("Tried to set an invalid frequency: %r." % value) + raise ValueError(f"Tried to set an invalid frequency: {value!r}.") self._frequency = value @property - def bss_lookup(self): + def bss_lookup(self) -> dict[str, BssSettings]: return self._bss_lookup @property - def ssid(self): + def ssid(self) -> str | None: """Returns: SsidSettings, The root Ssid settings being used.""" return self._ssid @ssid.setter - def ssid(self, value): + def ssid(self, value: str): """Sets the ssid for the hostapd. Args: @@ -250,30 +444,30 @@ return self._hidden @hidden.setter - def hidden(self, value): + def hidden(self, value: bool): """Sets if this ssid is hidden. Args: - value: bool, If true the ssid will be hidden. + value: If true the ssid will be hidden. """ self.hidden = value @property - def security(self): + def security(self) -> Security: """Returns: The security type being used.""" return self._security @security.setter - def security(self, value): + def security(self, value: Security): """Sets the security options to use. Args: - value: Security, The type of security to use. + value: The type of security to use. """ self._security = value @property - def ht_packet_capture_mode(self): + def ht_packet_capture_mode(self) -> str | None: """Get an appropriate packet capture HT parameter. When we go to configure a raw monitor we need to configure @@ -299,26 +493,24 @@ return "HT20" @property - def beacon_footer(self): - """Returns: bool _beacon_footer value.""" + def beacon_footer(self) -> str: return self._beacon_footer - def beacon_footer(self, value): + @beacon_footer.setter + def beacon_footer(self, value: str): """Changes the beacon footer. Args: - value: bool, The beacon footer vlaue. + value: The beacon footer value. """ self._beacon_footer = value @property - def scenario_name(self): - """Returns: string _scenario_name value, or None.""" + def scenario_name(self) -> str | None: return self._scenario_name @property - def min_streams(self): - """Returns: int, _min_streams value, or None.""" + def min_streams(self) -> int | None: return self._min_streams @property @@ -329,219 +521,7 @@ def wnm_features(self, value: FrozenSet[hostapd_constants.WnmFeature]): self._wnm_features = value - def __init__( - self, - interface=None, - mode=None, - channel=None, - frequency=None, - n_capabilities=[], - beacon_interval=None, - dtim_period=None, - frag_threshold=None, - rts_threshold=None, - short_preamble=None, - ssid=None, - hidden=False, - security=None, - bssid=None, - force_wmm=None, - pmf_support=None, - obss_interval=None, - vht_channel_width=None, - vht_center_channel=None, - ac_capabilities=[], - beacon_footer="", - spectrum_mgmt_required=None, - scenario_name=None, - min_streams=None, - wnm_features: FrozenSet[hostapd_constants.WnmFeature] = frozenset(), - bss_settings=[], - additional_parameters={}, - set_ap_defaults_profile="whirlwind", - ): - """Construct a HostapdConfig. - - You may specify channel or frequency, but not both. Both options - are checked for validity (i.e. you can't specify an invalid channel - or a frequency that will not be accepted). - - Args: - interface: string, The name of the interface to use. - mode: string, MODE_11x defined above. - channel: int, channel number. - frequency: int, frequency of channel. - n_capabilities: list of N_CAPABILITY_x defined above. - beacon_interval: int, beacon interval of AP. - dtim_period: int, include a DTIM every |dtim_period| beacons. - frag_threshold: int, maximum outgoing data frame size. - rts_threshold: int, maximum packet size without requiring explicit - protection via rts/cts or cts to self. - short_preamble: Whether to use a short preamble. - ssid: string, The name of the ssid to brodcast. - hidden: bool, Should the ssid be hidden. - security: Security, the secuirty settings to use. - bssid: string, a MAC address like string for the BSSID. - force_wmm: True if we should force WMM on, False if we should - force it off, None if we shouldn't force anything. - pmf_support: one of PMF_SUPPORT_* above. Controls whether the - client supports/must support 802.11w. If None, defaults to - required with wpa3, else defaults to disabled. - obss_interval: int, interval in seconds that client should be - required to do background scans for overlapping BSSes. - vht_channel_width: object channel width - vht_center_channel: int, center channel of segment 0. - ac_capabilities: list of AC_CAPABILITY_x defined above. - beacon_footer: string, containing (unvalidated) IE data to be - placed at the end of the beacon. - spectrum_mgmt_required: True if we require the DUT to support - spectrum management. - scenario_name: string to be included in file names, instead - of the interface name. - min_streams: int, number of spatial streams required. - wnm_features: WNM features to enable on the AP. - control_interface: The file name to use as the control interface. - bss_settings: The settings for all bss. - additional_parameters: A dictionary of additional parameters to add - to the hostapd config. - set_ap_defaults_profile: profile name to load defaults from - """ - self.set_ap_defaults_profile = set_ap_defaults_profile - self._interface = interface - if channel is not None and frequency is not None: - raise ValueError("Specify either frequency or channel " "but not both.") - - self._wmm_enabled = False - unknown_caps = [ - cap - for cap in n_capabilities - if cap not in hostapd_constants.N_CAPABILITIES_MAPPING - ] - if unknown_caps: - raise ValueError("Unknown capabilities: %r" % unknown_caps) - - self._frequency = None - if channel: - self.channel = channel - elif frequency: - self.frequency = frequency - else: - raise ValueError("Specify either frequency or channel.") - """ - if set_ap_defaults_model: - ap_default_config = hostapd_ap_default_configs.APDefaultConfig( - profile_name=set_ap_defaults_model, frequency=self.frequency) - force_wmm = ap_default_config.force_wmm - beacon_interval = ap_default_config.beacon_interval - dtim_period = ap_default_config.dtim_period - short_preamble = ap_default_config.short_preamble - self._interface = ap_default_config.interface - mode = ap_default_config.mode - if ap_default_config.n_capabilities: - n_capabilities = ap_default_config.n_capabilities - if ap_default_config.ac_capabilities: - ap_default_config = ap_default_config.ac_capabilities - """ - - self._n_capabilities = set(n_capabilities) - if self._n_capabilities: - self._wmm_enabled = True - if self._n_capabilities and mode is None: - mode = hostapd_constants.MODE_11N_PURE - self._mode = mode - - if not self.supports_frequency(self.frequency): - raise ValueError( - "Configured a mode %s that does not support " - "frequency %d" % (self._mode, self.frequency) - ) - - self._beacon_interval = beacon_interval - self._dtim_period = dtim_period - self._frag_threshold = frag_threshold - self._rts_threshold = rts_threshold - self._short_preamble = short_preamble - self._ssid = ssid - self._hidden = hidden - self._security = security - self._bssid = bssid - if force_wmm is not None: - if force_wmm: - self._wmm_enabled = 1 - else: - self._wmm_enabled = 0 - # Default PMF Values - if pmf_support is None: - if ( - self.security - and self.security.security_mode_string == hostapd_constants.WPA3_STRING - ): - # Set PMF required for WP3 - self._pmf_support = hostapd_constants.PMF_SUPPORT_REQUIRED - elif ( - self.security - and self.security.security_mode_string - in hostapd_constants.WPA3_MODE_STRINGS - ): - # Default PMF to enabled for WPA3 mixed modes (can be - # overwritten by explicitly provided value) - self._pmf_support = hostapd_constants.PMF_SUPPORT_ENABLED - else: - # Default PMD to disabled for all other modes (can be - # overwritten by explicitly provided value) - self._pmf_support = hostapd_constants.PMF_SUPPORT_DISABLED - elif pmf_support not in hostapd_constants.PMF_SUPPORT_VALUES: - raise ValueError("Invalid value for pmf_support: %r" % pmf_support) - elif ( - pmf_support != hostapd_constants.PMF_SUPPORT_REQUIRED - and self.security - and self.security.security_mode_string == hostapd_constants.WPA3_STRING - ): - raise ValueError("PMF support must be required with wpa3.") - else: - self._pmf_support = pmf_support - self._obss_interval = obss_interval - if self.is_11ac: - if str(vht_channel_width) == "40" or str(vht_channel_width) == "20": - self._vht_oper_chwidth = hostapd_constants.VHT_CHANNEL_WIDTH_40 - elif str(vht_channel_width) == "80": - self._vht_oper_chwidth = hostapd_constants.VHT_CHANNEL_WIDTH_80 - elif str(vht_channel_width) == "160": - self._vht_oper_chwidth = hostapd_constants.VHT_CHANNEL_WIDTH_160 - elif str(vht_channel_width) == "80+80": - self._vht_oper_chwidth = hostapd_constants.VHT_CHANNEL_WIDTH_80_80 - elif vht_channel_width is not None: - raise ValueError("Invalid channel width") - else: - logging.warning( - "No channel bandwidth specified. Using 80MHz for 11ac." - ) - self._vht_oper_chwidth = 1 - if vht_center_channel is not None: - self._vht_oper_centr_freq_seg0_idx = vht_center_channel - elif vht_channel_width == 20: - self._vht_oper_centr_freq_seg0_idx = channel - else: - self._vht_oper_centr_freq_seg0_idx = ( - self._get_11ac_center_channel_from_channel(self.channel) - ) - self._ac_capabilities = set(ac_capabilities) - self._beacon_footer = beacon_footer - self._spectrum_mgmt_required = spectrum_mgmt_required - self._scenario_name = scenario_name - self._min_streams = min_streams - self._wnm_features = wnm_features - self._additional_parameters = additional_parameters - - self._bss_lookup = collections.OrderedDict() - for bss in bss_settings: - if bss.name in self._bss_lookup: - raise ValueError( - "Cannot have multiple bss settings with the" " same name." - ) - self._bss_lookup[bss.name] = bss - - def __repr__(self): + def __repr__(self) -> str: return ( "%s(mode=%r, channel=%r, frequency=%r, " "n_capabilities=%r, beacon_interval=%r, " @@ -565,23 +545,23 @@ ) ) - def supports_channel(self, value): + def supports_channel(self, value: int) -> bool: """Check whether channel is supported by the current hardware mode. - @param value: int channel to check. + @param value: channel to check. @return True iff the current mode supports the band of the channel. """ - for freq, channel in hostapd_constants.CHANNEL_MAP.iteritems(): + for freq, channel in hostapd_constants.CHANNEL_MAP.items(): if channel == value: return self.supports_frequency(freq) return False - def supports_frequency(self, frequency): + def supports_frequency(self, frequency: int) -> bool: """Check whether frequency is supported by the current hardware mode. - @param frequency: int frequency to check. + @param frequency: frequency to check. @return True iff the current mode supports the band of the frequency. """ @@ -624,7 +604,7 @@ return True - def add_bss(self, bss): + def add_bss(self, bss: BssSettings) -> None: """Adds a new bss setting. Args: @@ -635,11 +615,11 @@ self._bss_lookup[bss.name] = bss - def remove_bss(self, bss_name): + def remove_bss(self, bss_name: str) -> None: """Removes a bss setting from the config.""" del self._bss_lookup[bss_name] - def package_configs(self): + def package_configs(self) -> list[dict[str, str | int]]: """Package the configs. Returns: @@ -667,7 +647,7 @@ conf["vht_oper_centr_freq_seg0_idx"] = self._vht_oper_centr_freq_seg0_idx conf["vht_capab"] = self._hostapd_vht_capabilities if self._wmm_enabled is not None: - conf["wmm_enabled"] = self._wmm_enabled + conf["wmm_enabled"] = 1 if self._wmm_enabled else 0 if self._require_ht: conf["require_ht"] = 1 if self._require_vht: @@ -696,17 +676,8 @@ conf["local_pwr_constraint"] = 0 # No local constraint conf["spectrum_mgmt_required"] = 1 # Requires local_pwr_constraint - if self._security: - for k, v in self._security.generate_dict().items(): - conf[k] = v - - all_conf = [conf] - - for bss in self._bss_lookup.values(): - bss_conf = collections.OrderedDict() - for k, v in (bss.generate_dict()).items(): - bss_conf[k] = v - all_conf.append(bss_conf) + for k, v in self._security.generate_dict().items(): + conf[k] = v for wnm_feature in self._wnm_features: if wnm_feature == hostapd_constants.WnmFeature.TIME_ADVERTISEMENT: @@ -725,6 +696,14 @@ hostapd_constants.ENABLE_WNM_IPV6_NEIGHBOR_ADVERTISEMENT_MULTICAST_TO_UNICAST ) + all_conf = [conf] + + for bss in self._bss_lookup.values(): + bss_conf = collections.OrderedDict() + for k, v in (bss.generate_dict()).items(): + bss_conf[k] = v + all_conf.append(bss_conf) + if self._additional_parameters: all_conf.append(self._additional_parameters)
diff --git a/src/antlion/controllers/ap_lib/hostapd_constants.py b/packages/antlion/controllers/ap_lib/hostapd_constants.py similarity index 95% rename from src/antlion/controllers/ap_lib/hostapd_constants.py rename to packages/antlion/controllers/ap_lib/hostapd_constants.py index ae7ef85..ea6fdb2 100755 --- a/src/antlion/controllers/ap_lib/hostapd_constants.py +++ b/packages/antlion/controllers/ap_lib/hostapd_constants.py
@@ -15,21 +15,40 @@ # limitations under the License. import itertools +from enum import Enum, StrEnum, auto, unique +from typing import TypedDict -from enum import Enum, auto, unique - +# TODO(http://b/286584981): Replace with BandType BAND_2G = "2g" BAND_5G = "5g" + + +@unique +class BandType(StrEnum): + BAND_2G = "2g" + BAND_5G = "5g" + + def default_channel(self) -> int: + match self: + case BandType.BAND_2G: + return 6 + case BandType.BAND_5G: + return 36 + + CHANNEL_BANDWIDTH_20MHZ = 20 CHANNEL_BANDWIDTH_40MHZ = 40 CHANNEL_BANDWIDTH_80MHZ = 80 CHANNEL_BANDWIDTH_160MHZ = 160 + +# TODO(http://b/286584981): Replace with SecurityModeInt WEP = 0 WPA1 = 1 WPA2 = 2 WPA3 = 2 # same as wpa2 and wpa2/wpa3, distinguished by wpa_key_mgmt -MIXED = 3 # applies to wpa/wpa2, and wpa/wpa2/wpa3, distinquished by wpa_key_mgmt +MIXED = 3 # applies to wpa/wpa2, and wpa/wpa2/wpa3, distinguished by wpa_key_mgmt ENT = 4 # get the correct constant + MAX_WPA_PSK_LENGTH = 64 MIN_WPA_PSK_LENGTH = 8 MAX_WPA_PASSWORD_LENGTH = 63 @@ -38,6 +57,8 @@ WPA2_DEFAULT_CIPER = "CCMP" WPA_GROUP_KEY_ROTATION_TIME = 600 WPA_STRICT_REKEY_DEFAULT = True + +# TODO(http://b/286584981): Replace these with SecurityMode enum WEP_STRING = "wep" WPA_STRING = "wpa" WPA2_STRING = "wpa2" @@ -46,10 +67,14 @@ WPA2_WPA3_MIXED_STRING = "wpa2/wpa3" WPA_WPA2_WPA3_MIXED_STRING = "wpa/wpa2/wpa3" ENT_STRING = "ent" + +# TODO(http://b/286584981): Replace with KeyManagement ENT_KEY_MGMT = "WPA-EAP" WPA_PSK_KEY_MGMT = "WPA-PSK" SAE_KEY_MGMT = "SAE" DUAL_WPA_PSK_SAE_KEY_MGMT = "WPA-PSK SAE" + +# TODO(http://b/286584981): Replace with SecurityMode.security_mode_int SECURITY_STRING_TO_SECURITY_MODE_INT = { WPA_STRING: WPA1, WPA2_STRING: WPA2, @@ -60,6 +85,8 @@ WEP_STRING: WEP, ENT_STRING: ENT, } + +# TODO(http://b/286584981): Replace with SecurityMode.key_management SECURITY_STRING_TO_WPA_KEY_MGMT = { WPA_STRING: WPA_PSK_KEY_MGMT, WPA2_STRING: WPA_PSK_KEY_MGMT, @@ -68,8 +95,8 @@ WPA2_WPA3_MIXED_STRING: DUAL_WPA_PSK_SAE_KEY_MGMT, WPA_WPA2_WPA3_MIXED_STRING: DUAL_WPA_PSK_SAE_KEY_MGMT, } -WPA3_MODE_STRINGS = {WPA3_STRING, WPA2_WPA3_MIXED_STRING, WPA_WPA2_WPA3_MIXED_STRING} +# TODO(http://b/286584981): Replace with SecurityMode.fuchsia_security_type SECURITY_STRING_TO_DEFAULT_TARGET_SECURITY = { WEP_STRING: WEP_STRING, WPA_STRING: WPA_STRING, @@ -91,8 +118,11 @@ WEP_HEX_LENGTH = [10, 26, 32, 58] WEP_STR_LENGTH = [5, 13, 16] WEP_DEFAULT_STR_LENGTH = 13 + +# TODO(http://b/286584981): Replace with BandType.default_channel() AP_DEFAULT_CHANNEL_2G = 6 AP_DEFAULT_CHANNEL_5G = 36 + AP_DEFAULT_MAX_SSIDS_2G = 8 AP_DEFAULT_MAX_SSIDS_5G = 8 AP_SSID_LENGTH_2G = 8 @@ -360,10 +390,16 @@ DRIVER_NAME = "nl80211" + +class VHTChannelWidth(TypedDict): + delta: int + channels: list[tuple[int, int]] + + CENTER_CHANNEL_MAP = { - VHT_CHANNEL_WIDTH_40: { - "delta": 2, - "channels": ( + VHT_CHANNEL_WIDTH_40: VHTChannelWidth( + delta=2, + channels=[ (36, 40), (44, 48), (52, 56), @@ -376,20 +412,23 @@ (140, 144), (149, 153), (157, 161), - ), - }, - VHT_CHANNEL_WIDTH_80: { - "delta": 6, - "channels": ( + ], + ), + VHT_CHANNEL_WIDTH_80: VHTChannelWidth( + delta=6, + channels=[ (36, 48), (52, 64), (100, 112), (116, 128), (132, 144), (149, 161), - ), - }, - VHT_CHANNEL_WIDTH_160: {"delta": 14, "channels": ((36, 64), (100, 128))}, + ], + ), + VHT_CHANNEL_WIDTH_160: VHTChannelWidth( + delta=14, + channels=[(36, 64), (100, 128)], + ), } OFDM_DATA_RATES = {"supported_rates": "60 90 120 180 240 360 480 540"} @@ -885,7 +924,7 @@ 165: {20}, } -ALL_CHANNELS = {**ALL_CHANNELS_2G, **ALL_CHANNELS_5G} +ALL_CHANNELS = ALL_CHANNELS_2G | ALL_CHANNELS_5G @unique
diff --git a/packages/antlion/controllers/ap_lib/hostapd_security.py b/packages/antlion/controllers/ap_lib/hostapd_security.py new file mode 100644 index 0000000..fe1d41c --- /dev/null +++ b/packages/antlion/controllers/ap_lib/hostapd_security.py
@@ -0,0 +1,453 @@ +# Copyright 2022 The Fuchsia Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import string +from enum import Enum, StrEnum, auto, unique + +from honeydew.affordances.connectivity.wlan.utils.types import SecurityProtocol + +from antlion.controllers.ap_lib import hostapd_constants + + +class SecurityModeInt(int, Enum): + """Possible values for hostapd's "wpa" config option. + + The int value is a bit field that can enable WPA and/or WPA2. + + bit0 = enable WPA defined by IEEE 802.11i/D3.0 + bit1 = enable RNA (WPA2) defined by IEEE 802.11i/RSN + bit2 = enable WAPI (rejected/withdrawn) + bit3 = enable OSEN (ENT) + """ + + WEP = 0 + WPA1 = 1 + WPA2 = 2 + WPA3 = 2 # same as wpa2 and wpa2/wpa3; distinguished by wpa_key_mgmt + MIXED = 3 # applies to wpa/wpa2 and wpa/wpa2/wpa3; distinguished by wpa_key_mgmt + ENT = 8 + + def __str__(self): + return str(self.value) + + +@unique +class KeyManagement(StrEnum): + SAE = "SAE" + WPA_PSK = "WPA-PSK" + WPA_PSK_SAE = "WPA-PSK SAE" + ENT = "WPA-EAP" + + +# TODO(http://b/286584981): This is currently only being used for OpenWRT. +# Investigate whether we can replace KeyManagement with OpenWRTEncryptionMode. +@unique +class OpenWRTEncryptionMode(StrEnum): + """Combination of Wi-Fi encryption mode and ciphers. + + Only used by OpenWRT. + + Besides the encryption mode, the encryption option also specifies the group and peer + ciphers to use. To override the cipher, the value of encryption must be given in the + form "mode+cipher". This enum contains all possible combinations. + + See https://openwrt.org/docs/guide-user/network/wifi/basic#encryption_modes. + """ + + NONE = "none" + """No authentication, no ciphers""" + SAE = "sae" + """WPA3 Personal (SAE) using CCMP cipher""" + SAE_MIXED = "sae-mixed" + """WPA2/WPA3 Personal (PSK/SAE) mixed mode using CCMP cipher""" + PSK2_TKIP_CCMP = "psk2+tkip+ccmp" + """WPA2 Personal (PSK) using TKIP and CCMP ciphers""" + PSK2_TKIP_AES = "psk2+tkip+aes" + """WPA2 Personal (PSK) using TKIP and AES ciphers""" + PSK2_TKIP = "psk2+tkip" + """WPA2 Personal (PSK) using TKIP cipher""" + PSK2_CCMP = "psk2+ccmp" + """WPA2 Personal (PSK) using CCMP cipher""" + PSK2_AES = "psk2+aes" + """WPA2 Personal (PSK) using AES cipher""" + PSK2 = "psk2" + """WPA2 Personal (PSK) using CCMP cipher""" + PSK_TKIP_CCMP = "psk+tkip+ccmp" + """WPA Personal (PSK) using TKIP and CCMP ciphers""" + PSK_TKIP_AES = "psk+tkip+aes" + """WPA Personal (PSK) using TKIP and AES ciphers""" + PSK_TKIP = "psk+tkip" + """WPA Personal (PSK) using TKIP cipher""" + PSK_CCMP = "psk+ccmp" + """WPA Personal (PSK) using CCMP cipher""" + PSK_AES = "psk+aes" + """WPA Personal (PSK) using AES cipher""" + PSK = "psk" + """WPA Personal (PSK) using CCMP cipher""" + PSK_MIXED_TKIP_CCMP = "psk-mixed+tkip+ccmp" + """WPA/WPA2 Personal (PSK) mixed mode using TKIP and CCMP ciphers""" + PSK_MIXED_TKIP_AES = "psk-mixed+tkip+aes" + """WPA/WPA2 Personal (PSK) mixed mode using TKIP and AES ciphers""" + PSK_MIXED_TKIP = "psk-mixed+tkip" + """WPA/WPA2 Personal (PSK) mixed mode using TKIP cipher""" + PSK_MIXED_CCMP = "psk-mixed+ccmp" + """WPA/WPA2 Personal (PSK) mixed mode using CCMP cipher""" + PSK_MIXED_AES = "psk-mixed+aes" + """WPA/WPA2 Personal (PSK) mixed mode using AES cipher""" + PSK_MIXED = "psk-mixed" + """WPA/WPA2 Personal (PSK) mixed mode using CCMP cipher""" + WEP = "wep" + """defaults to “open system” authentication aka wep+open using RC4 cipher""" + WEP_OPEN = "wep+open" + """“open system” authentication using RC4 cipher""" + WEP_SHARED = "wep+shared" + """“shared key” authentication using RC4 cipher""" + WPA3 = "wpa3" + """WPA3 Enterprise using CCMP cipher""" + WPA3_MIXED = "wpa3-mixed" + """WPA3/WPA2 Enterprise using CCMP cipher""" + WPA2_TKIP_CCMP = "wpa2+tkip+ccmp" + """WPA2 Enterprise using TKIP and CCMP ciphers""" + WPA2_TKIP_AES = "wpa2+tkip+aes" + """WPA2 Enterprise using TKIP and AES ciphers""" + WPA2_CCMP = "wpa2+ccmp" + """WPA2 Enterprise using CCMP cipher""" + WPA2_AES = "wpa2+aes'" + """WPA2 Enterprise using AES cipher""" + WPA2 = "wpa2" + """WPA2 Enterprise using CCMP cipher""" + WPA2_TKIP = "wpa2+tkip" + """WPA2 Enterprise using TKIP cipher""" + WPA_TKIP_CCMP = "wpa+tkip+ccmp" + """WPA Enterprise using TKIP and CCMP ciphers""" + WPA_TKIP_AES = "wpa+tkip+aes" + """WPA Enterprise using TKIP and AES ciphers""" + WPA_CCMP = "wpa+ccmp" + """WPA Enterprise using CCMP cipher""" + WPA_AES = "wpa+aes" + """WPA Enterprise using AES cipher""" + WPA_TKIP = "wpa+tkip" + """WPA Enterprise using TKIP cipher""" + WPA = "wpa" + """WPA Enterprise using CCMP cipher""" + WPA_MIXED_TKIP_CCMP = "wpa-mixed+tkip+ccmp" + """WPA/WPA2 Enterprise mixed mode using TKIP and CCMP ciphers""" + WPA_MIXED_TKIP_AES = "wpa-mixed+tkip+aes" + """WPA/WPA2 Enterprise mixed mode using TKIP and AES ciphers""" + WPA_MIXED_TKIP = "wpa-mixed+tkip" + """WPA/WPA2 Enterprise mixed mode using TKIP cipher""" + WPA_MIXED_CCMP = "wpa-mixed+ccmp" + """WPA/WPA2 Enterprise mixed mode using CCMP cipher""" + WPA_MIXED_AES = "wpa-mixed+aes" + """WPA/WPA2 Enterprise mixed mode using AES cipher""" + WPA_MIXED = "wpa-mixed" + """WPA/WPA2 Enterprise mixed mode using CCMP cipher""" + OWE = "owe" + """Opportunistic Wireless Encryption (OWE) using CCMP cipher""" + + +@unique +class FuchsiaSecurityType(StrEnum): + """Fuchsia supported security types. + + Defined by the fuchsia.wlan.policy.SecurityType FIDL. + + https://cs.opensource.google/fuchsia/fuchsia/+/main:sdk/fidl/fuchsia.wlan.policy/types.fidl + """ + + NONE = "none" + WEP = "wep" + WPA = "wpa" + WPA2 = "wpa2" + WPA3 = "wpa3" + + +@unique +class SecurityMode(StrEnum): + OPEN = auto() + WEP = auto() + WPA = auto() + WPA2 = auto() + WPA_WPA2 = auto() + WPA3 = auto() + WPA2_WPA3 = auto() + WPA_WPA2_WPA3 = auto() + ENT = auto() + + def security_mode_int(self) -> SecurityModeInt: + match self: + case SecurityMode.OPEN: + raise TypeError("Open security doesn't have a SecurityModeInt") + case SecurityMode.WEP: + return SecurityModeInt.WEP + case SecurityMode.WPA: + return SecurityModeInt.WPA1 + case SecurityMode.WPA2: + return SecurityModeInt.WPA2 + case SecurityMode.WPA_WPA2: + return SecurityModeInt.MIXED + case SecurityMode.WPA3: + return SecurityModeInt.WPA3 + case SecurityMode.WPA2_WPA3: + return SecurityModeInt.WPA3 + case SecurityMode.WPA_WPA2_WPA3: + return SecurityModeInt.MIXED + case SecurityMode.ENT: + return SecurityModeInt.ENT + + def key_management(self) -> KeyManagement | None: + match self: + case SecurityMode.OPEN: + return None + case SecurityMode.WEP: + return None + case SecurityMode.WPA: + return KeyManagement.WPA_PSK + case SecurityMode.WPA2: + return KeyManagement.WPA_PSK + case SecurityMode.WPA_WPA2: + return KeyManagement.WPA_PSK + case SecurityMode.WPA3: + return KeyManagement.SAE + case SecurityMode.WPA2_WPA3: + return KeyManagement.WPA_PSK_SAE + case SecurityMode.WPA_WPA2_WPA3: + return KeyManagement.WPA_PSK_SAE + case SecurityMode.ENT: + return KeyManagement.ENT + + def fuchsia_security_type(self) -> FuchsiaSecurityType: + match self: + case SecurityMode.OPEN: + return FuchsiaSecurityType.NONE + case SecurityMode.WEP: + return FuchsiaSecurityType.WEP + case SecurityMode.WPA: + return FuchsiaSecurityType.WPA + case SecurityMode.WPA2: + return FuchsiaSecurityType.WPA2 + case SecurityMode.WPA_WPA2: + return FuchsiaSecurityType.WPA2 + case SecurityMode.WPA3: + return FuchsiaSecurityType.WPA3 + case SecurityMode.WPA2_WPA3: + return FuchsiaSecurityType.WPA3 + case SecurityMode.WPA_WPA2_WPA3: + return FuchsiaSecurityType.WPA3 + case SecurityMode.ENT: + raise NotImplementedError( + f'Fuchsia has not implemented support for security mode "{self}"' + ) + + def is_wpa3(self) -> bool: + match self: + case SecurityMode.OPEN: + return False + case SecurityMode.WEP: + return False + case SecurityMode.WPA: + return False + case SecurityMode.WPA2: + return False + case SecurityMode.WPA_WPA2: + return False + case SecurityMode.WPA3: + return True + case SecurityMode.WPA2_WPA3: + return True + case SecurityMode.WPA_WPA2_WPA3: + return True + case SecurityMode.ENT: + return False + raise TypeError("Unknown security mode") + + def protocol(self, enterprise: bool = False) -> SecurityProtocol: + match self: + case SecurityMode.OPEN: + return SecurityProtocol.OPEN + case SecurityMode.WEP: + return SecurityProtocol.WEP + case SecurityMode.WPA: + return SecurityProtocol.WPA1 + case SecurityMode.WPA2: + return ( + SecurityProtocol.WPA2_ENTERPRISE + if enterprise + else SecurityProtocol.WPA2_PERSONAL + ) + case SecurityMode.WPA_WPA2: + return ( + SecurityProtocol.WPA2_ENTERPRISE + if enterprise + else SecurityProtocol.WPA2_PERSONAL + ) + case SecurityMode.WPA3: + return ( + SecurityProtocol.WPA3_ENTERPRISE + if enterprise + else SecurityProtocol.WPA3_PERSONAL + ) + case SecurityMode.WPA2_WPA3: + return ( + SecurityProtocol.WPA3_ENTERPRISE + if enterprise + else SecurityProtocol.WPA3_PERSONAL + ) + case SecurityMode.WPA_WPA2_WPA3: + return ( + SecurityProtocol.WPA3_ENTERPRISE + if enterprise + else SecurityProtocol.WPA3_PERSONAL + ) + case SecurityMode.ENT: + raise NotImplementedError( + f'Fuchsia has not implemented support for security mode "{self}"' + ) + + +class Security(object): + """The Security class for hostapd representing some of the security + settings that are allowed in hostapd. If needed more can be added. + """ + + def __init__( + self, + security_mode: SecurityMode = SecurityMode.OPEN, + password: str | None = None, + wpa_cipher: str | None = hostapd_constants.WPA_DEFAULT_CIPHER, + wpa2_cipher: str | None = hostapd_constants.WPA2_DEFAULT_CIPER, + wpa_group_rekey: int = hostapd_constants.WPA_GROUP_KEY_ROTATION_TIME, + wpa_strict_rekey: bool = hostapd_constants.WPA_STRICT_REKEY_DEFAULT, + wep_default_key: int = hostapd_constants.WEP_DEFAULT_KEY, + radius_server_ip: str | None = None, + radius_server_port: int | None = None, + radius_server_secret: str | None = None, + ) -> None: + """Gather all of the security settings for WPA-PSK. This could be + expanded later. + + Args: + security_mode: Type of security mode. + password: The PSK or passphrase for the security mode. + wpa_cipher: The cipher to be used for wpa. + Options: TKIP, CCMP, TKIP CCMP + Default: TKIP + wpa2_cipher: The cipher to be used for wpa2. + Options: TKIP, CCMP, TKIP CCMP + Default: CCMP + wpa_group_rekey: How often to refresh the GTK regardless of network + changes. + Options: An integer in seconds, None + Default: 600 seconds + wpa_strict_rekey: Whether to do a group key update when client + leaves the network or not. + Options: True, False + Default: True + wep_default_key: The wep key number to use when transmitting. + radius_server_ip: Radius server IP for Enterprise auth. + radius_server_port: Radius server port for Enterprise auth. + radius_server_secret: Radius server secret for Enterprise auth. + """ + self.security_mode = security_mode + self.wpa_cipher = wpa_cipher + self.wpa2_cipher = wpa2_cipher + self.wpa_group_rekey = wpa_group_rekey + self.wpa_strict_rekey = wpa_strict_rekey + self.wep_default_key = wep_default_key + self.radius_server_ip = radius_server_ip + self.radius_server_port = radius_server_port + self.radius_server_secret = radius_server_secret + if password: + if self.security_mode is SecurityMode.WEP: + if len(password) in hostapd_constants.WEP_STR_LENGTH: + self.password = f'"{password}"' + elif len(password) in hostapd_constants.WEP_HEX_LENGTH and all( + c in string.hexdigits for c in password + ): + self.password = password + else: + raise ValueError( + "WEP key must be a hex string of %s characters" + % hostapd_constants.WEP_HEX_LENGTH + ) + else: + if ( + len(password) < hostapd_constants.MIN_WPA_PSK_LENGTH + or len(password) > hostapd_constants.MAX_WPA_PSK_LENGTH + ): + raise ValueError( + "Password must be a minumum of %s characters and a maximum of %s" + % ( + hostapd_constants.MIN_WPA_PSK_LENGTH, + hostapd_constants.MAX_WPA_PSK_LENGTH, + ) + ) + else: + self.password = password + + def __str__(self) -> str: + return self.security_mode + + def generate_dict(self) -> dict[str, str | int]: + """Returns: an ordered dictionary of settings""" + if self.security_mode is SecurityMode.OPEN: + return {} + + settings: dict[str, str | int] = collections.OrderedDict() + + if self.security_mode is SecurityMode.WEP: + settings["wep_default_key"] = self.wep_default_key + settings[f"wep_key{self.wep_default_key}"] = self.password + elif self.security_mode == SecurityMode.ENT: + if self.radius_server_ip is not None: + settings["auth_server_addr"] = self.radius_server_ip + if self.radius_server_port is not None: + settings["auth_server_port"] = self.radius_server_port + if self.radius_server_secret is not None: + settings["auth_server_shared_secret"] = self.radius_server_secret + settings["wpa_key_mgmt"] = hostapd_constants.ENT_KEY_MGMT + settings["ieee8021x"] = hostapd_constants.IEEE8021X + settings["wpa"] = hostapd_constants.WPA2 + else: + settings["wpa"] = self.security_mode.security_mode_int().value + if len(self.password) == hostapd_constants.MAX_WPA_PSK_LENGTH: + settings["wpa_psk"] = self.password + else: + settings["wpa_passphrase"] = self.password + # For wpa, wpa/wpa2, and wpa/wpa2/wpa3, add wpa_pairwise + if self.wpa_cipher and ( + self.security_mode is SecurityMode.WPA + or self.security_mode is SecurityMode.WPA_WPA2 + or self.security_mode is SecurityMode.WPA_WPA2_WPA3 + ): + settings["wpa_pairwise"] = self.wpa_cipher + # For wpa/wpa2, wpa2, wpa3, and wpa2/wpa3, and wpa/wpa2, wpa3, add rsn_pairwise + if self.wpa2_cipher and ( + self.security_mode is SecurityMode.WPA_WPA2 + or self.security_mode is SecurityMode.WPA2 + or self.security_mode is SecurityMode.WPA2_WPA3 + or self.security_mode is SecurityMode.WPA3 + ): + settings["rsn_pairwise"] = self.wpa2_cipher + # Add wpa_key_mgmt based on security mode string + wpa_key_mgmt = self.security_mode.key_management() + if wpa_key_mgmt is not None: + settings["wpa_key_mgmt"] = str(wpa_key_mgmt) + if self.wpa_group_rekey: + settings["wpa_group_rekey"] = self.wpa_group_rekey + if self.wpa_strict_rekey: + settings["wpa_strict_rekey"] = hostapd_constants.WPA_STRICT_REKEY + + return settings
diff --git a/packages/antlion/controllers/ap_lib/hostapd_utils.py b/packages/antlion/controllers/ap_lib/hostapd_utils.py new file mode 100644 index 0000000..060777e --- /dev/null +++ b/packages/antlion/controllers/ap_lib/hostapd_utils.py
@@ -0,0 +1,97 @@ +# Copyright 2022 The Fuchsia Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from antlion import utils +from antlion.controllers.ap_lib import hostapd_constants +from antlion.controllers.ap_lib.hostapd_security import Security, SecurityMode + + +def generate_random_password( + security_mode: SecurityMode = SecurityMode.OPEN, + length: int | None = None, + hex: int | None = None, +) -> str: + """Generates a random password. Defaults to an 8 character ASCII password. + + Args: + security_mode: Used to determine if length should be WEP compatible + (useful for generated tests to simply pass in security mode) + length: Length of password to generate. Defaults to 8, unless + security_mode is WEP, then 13 + hex: If True, generates a hex string, else ascii + """ + if hex: + generator_func = utils.rand_hex_str + else: + generator_func = utils.rand_ascii_str + + if length: + return generator_func(length) + if security_mode is SecurityMode.WEP: + return generator_func(hostapd_constants.WEP_DEFAULT_STR_LENGTH) + else: + return generator_func(hostapd_constants.MIN_WPA_PSK_LENGTH) + + +def verify_interface(interface: str, valid_interfaces: list[str]) -> None: + """Raises error if interface is missing or invalid + + Args: + interface: interface name + valid_interfaces: valid interface names + """ + if interface not in valid_interfaces: + raise ValueError(f"Invalid interface name was passed: {interface}") + + +def verify_security_mode( + security_profile: Security, valid_security_modes: list[SecurityMode] +) -> None: + """Raises error if security mode is not in list of valid security modes. + + Args: + security_profile: Security to verify + valid_security_modes: Valid security modes for a profile. + """ + if security_profile.security_mode not in valid_security_modes: + raise ValueError( + f"Invalid Security Mode: {security_profile.security_mode}; " + f"Valid Security Modes for this profile: {valid_security_modes}" + ) + + +def verify_cipher(security_profile: Security, valid_ciphers: list[str]) -> None: + """Raise error if cipher is not in list of valid ciphers. + + Args: + security_profile: Security profile to verify + valid_ciphers: A list of valid ciphers for security_profile. + """ + if security_profile.security_mode is SecurityMode.OPEN: + raise ValueError("Security mode is open.") + elif security_profile.security_mode is SecurityMode.WPA: + if security_profile.wpa_cipher not in valid_ciphers: + raise ValueError( + f"Invalid WPA Cipher: {security_profile.wpa_cipher}. " + f"Valid WPA Ciphers for this profile: {valid_ciphers}" + ) + elif security_profile.security_mode is SecurityMode.WPA2: + if security_profile.wpa2_cipher not in valid_ciphers: + raise ValueError( + f"Invalid WPA2 Cipher: {security_profile.wpa2_cipher}. " + f"Valid WPA2 Ciphers for this profile: {valid_ciphers}" + ) + else: + raise ValueError(f"Invalid Security Mode: {security_profile.security_mode}")
diff --git a/src/antlion/controllers/ap_lib/radio_measurement.py b/packages/antlion/controllers/ap_lib/radio_measurement.py similarity index 100% rename from src/antlion/controllers/ap_lib/radio_measurement.py rename to packages/antlion/controllers/ap_lib/radio_measurement.py
diff --git a/src/antlion/controllers/ap_lib/radvd.py b/packages/antlion/controllers/ap_lib/radvd.py similarity index 87% rename from src/antlion/controllers/ap_lib/radvd.py rename to packages/antlion/controllers/ap_lib/radvd.py index 216ad0e..cb099d2 100644 --- a/src/antlion/controllers/ap_lib/radvd.py +++ b/packages/antlion/controllers/ap_lib/radvd.py
@@ -17,11 +17,11 @@ import tempfile import time -from typing import Any, Optional - from antlion.controllers.ap_lib.radvd_config import RadvdConfig from antlion.controllers.utils_lib.commands import shell from antlion.libs.proc import job +from antlion.logger import LogLevel +from antlion.runner import Runner class Error(Exception): @@ -41,10 +41,10 @@ def __init__( self, - runner: Any, + runner: Runner, interface: str, - working_dir: Optional[str] = None, - radvd_binary: Optional[str] = None, + working_dir: str | None = None, + radvd_binary: str | None = None, ) -> None: """ Args: @@ -61,7 +61,7 @@ radvd_binary = "radvd" else: logging.debug(f"Using radvd binary located at {radvd_binary}") - if working_dir is None and runner == job.run: + if working_dir is None and runner.run == job.run: working_dir = tempfile.gettempdir() else: working_dir = "/tmp" @@ -69,8 +69,8 @@ self._runner = runner self._interface = interface self._working_dir = working_dir - self.config: Optional[RadvdConfig] = None - self._shell = shell.ShellCommand(runner, working_dir) + self.config: RadvdConfig | None = None + self._shell = shell.ShellCommand(runner) self._log_file = f"{working_dir}/radvd-{self._interface}.log" self._config_file = f"{working_dir}/radvd-{self._interface}.conf" self._pid_file = f"{working_dir}/radvd-{self._interface}.pid" @@ -132,7 +132,8 @@ A string of the radvd logs. """ # TODO: Auto pulling of logs when stop is called. - return self._shell.read_file(self._log_file) + with LogLevel(self._runner.log, logging.INFO): + return self._shell.read_file(self._log_file) def _wait_for_process(self, timeout: int = 60) -> None: """Waits for the process to come up. @@ -179,37 +180,35 @@ conf = config.package_configs() lines = ["interface %s {" % self._interface] for interface_option_key, interface_option in conf["interface_options"].items(): - lines.append( - "\t%s %s;" % (str(interface_option_key), str(interface_option)) - ) - lines.append("\tprefix %s" % conf["prefix"]) + lines.append(f"\t{str(interface_option_key)} {str(interface_option)};") + lines.append(f"\tprefix {conf['prefix']}") lines.append("\t{") for prefix_option in conf["prefix_options"].items(): - lines.append("\t\t%s;" % " ".join(map(str, prefix_option))) + lines.append(f"\t\t{' '.join(map(str, prefix_option))};") lines.append("\t};") if conf["clients"]: lines.append("\tclients") lines.append("\t{") for client in conf["clients"]: - lines.append("\t\t%s;" % client) + lines.append(f"\t\t{client};") lines.append("\t};") if conf["route"]: lines.append("\troute %s {" % conf["route"]) for route_option in conf["route_options"].items(): - lines.append("\t\t%s;" % " ".join(map(str, route_option))) + lines.append(f"\t\t{' '.join(map(str, route_option))};") lines.append("\t};") if conf["rdnss"]: lines.append( "\tRDNSS %s {" % " ".join([str(elem) for elem in conf["rdnss"]]) ) for rdnss_option in conf["rdnss_options"].items(): - lines.append("\t\t%s;" % " ".join(map(str, rdnss_option))) + lines.append(f"\t\t{' '.join(map(str, rdnss_option))};") lines.append("\t};") lines.append("};") output_config = "\n".join(lines) - logging.info("Writing %s" % self._config_file) + logging.info(f"Writing {self._config_file}") logging.debug("******************Start*******************") - logging.debug("\n%s" % output_config) + logging.debug(f"\n{output_config}") logging.debug("*******************End********************") self._shell.write_file(self._config_file, output_config)
diff --git a/src/antlion/controllers/ap_lib/radvd_config.py b/packages/antlion/controllers/ap_lib/radvd_config.py similarity index 90% rename from src/antlion/controllers/ap_lib/radvd_config.py rename to packages/antlion/controllers/ap_lib/radvd_config.py index 647df82..d3d6d97 100644 --- a/src/antlion/controllers/ap_lib/radvd_config.py +++ b/packages/antlion/controllers/ap_lib/radvd_config.py
@@ -12,12 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, List, Optional +import collections +from typing import Any from antlion.controllers.ap_lib import radvd_constants -import collections - class RadvdConfig(object): """The root settings for the router advertisement daemon. @@ -28,41 +27,41 @@ def __init__( self, prefix: str = radvd_constants.DEFAULT_PREFIX, - clients: List[str] = [], - route: Optional[Any] = None, - rdnss: List[str] = [], - ignore_if_missing: Optional[str] = None, + clients: list[str] = [], + route: Any | None = None, + rdnss: list[str] = [], + ignore_if_missing: str | None = None, adv_send_advert: str = radvd_constants.ADV_SEND_ADVERT_ON, - unicast_only: Optional[str] = None, - max_rtr_adv_interval: Optional[int] = None, - min_rtr_adv_interval: Optional[int] = None, - min_delay_between_ras: Optional[int] = None, - adv_managed_flag: Optional[str] = None, - adv_other_config_flag: Optional[str] = None, - adv_link_mtu: Optional[int] = None, - adv_reachable_time: Optional[int] = None, - adv_retrans_timer: Optional[int] = None, - adv_cur_hop_limit: Optional[int] = None, - adv_default_lifetime: Optional[int] = None, - adv_default_preference: Optional[str] = None, - adv_source_ll_address: Optional[str] = None, - adv_home_agent_flag: Optional[str] = None, - adv_home_agent_info: Optional[str] = None, - home_agent_lifetime: Optional[int] = None, - home_agent_preference: Optional[int] = None, - adv_mob_rtr_support_flag: Optional[str] = None, - adv_interval_opt: Optional[str] = None, + unicast_only: str | None = None, + max_rtr_adv_interval: int | None = None, + min_rtr_adv_interval: int | None = None, + min_delay_between_ras: int | None = None, + adv_managed_flag: str | None = None, + adv_other_config_flag: str | None = None, + adv_link_mtu: int | None = None, + adv_reachable_time: int | None = None, + adv_retrans_timer: int | None = None, + adv_cur_hop_limit: int | None = None, + adv_default_lifetime: int | None = None, + adv_default_preference: str | None = None, + adv_source_ll_address: str | None = None, + adv_home_agent_flag: str | None = None, + adv_home_agent_info: str | None = None, + home_agent_lifetime: int | None = None, + home_agent_preference: int | None = None, + adv_mob_rtr_support_flag: str | None = None, + adv_interval_opt: str | None = None, adv_on_link: str = radvd_constants.ADV_ON_LINK_ON, adv_autonomous: str = radvd_constants.ADV_AUTONOMOUS_ON, - adv_router_addr: Optional[str] = None, - adv_valid_lifetime: Optional[int] = None, - adv_preferred_lifetime: Optional[int] = None, - base_6to4_interface: Optional[str] = None, - adv_route_lifetime: Optional[int] = None, - adv_route_preference: Optional[str] = None, - adv_rdnss_preference: Optional[int] = None, - adv_rdnss_open: Optional[str] = None, - adv_rdnss_lifetime: Optional[int] = None, + adv_router_addr: str | None = None, + adv_valid_lifetime: int | None = None, + adv_preferred_lifetime: int | None = None, + base_6to4_interface: str | None = None, + adv_route_lifetime: int | None = None, + adv_route_preference: str | None = None, + adv_rdnss_preference: int | None = None, + adv_rdnss_open: str | None = None, + adv_rdnss_lifetime: int | None = None, ) -> None: """Construct a RadvdConfig. @@ -241,7 +240,7 @@ self._adv_rdnss_lifetime = adv_rdnss_lifetime def package_configs(self): - conf = dict() + conf: dict[str, Any] = dict() conf["prefix"] = self._prefix conf["clients"] = self._clients conf["route"] = self._route
diff --git a/src/antlion/controllers/ap_lib/radvd_constants.py b/packages/antlion/controllers/ap_lib/radvd_constants.py similarity index 100% rename from src/antlion/controllers/ap_lib/radvd_constants.py rename to packages/antlion/controllers/ap_lib/radvd_constants.py
diff --git a/packages/antlion/controllers/ap_lib/regulatory_channels.py b/packages/antlion/controllers/ap_lib/regulatory_channels.py new file mode 100644 index 0000000..432607c --- /dev/null +++ b/packages/antlion/controllers/ap_lib/regulatory_channels.py
@@ -0,0 +1,710 @@ +from dataclasses import dataclass + +Channel = int +Bandwidth = int +# TODO(http://b/281728764): Add device requirements to each frequency e.g. +# "MUST be used indoors only" or "MUST be used with DFS". +ChannelBandwidthMap = dict[Channel, list[Bandwidth]] + + +@dataclass +class CountryChannels: + country_code: str + allowed_channels: ChannelBandwidthMap + + +# All antlion-supported channels and frequencies for use in regulatory testing. +TEST_CHANNELS: ChannelBandwidthMap = { + 1: [20], + 2: [20], + 3: [20], + 4: [20], + 5: [20], + 6: [20], + 7: [20], + 8: [20], + 9: [20], + 10: [20], + 11: [20], + 12: [20], + 13: [20], + 14: [20], + 36: [20, 40, 80], + 40: [20, 40, 80], + 44: [20, 40, 80], + 48: [20, 40, 80], + 52: [20, 40, 80], + 56: [20, 40, 80], + 60: [20, 40, 80], + 64: [20, 40, 80], + 100: [20, 40, 80], + 104: [20, 40, 80], + 108: [20, 40, 80], + 112: [20, 40, 80], + 116: [20, 40, 80], + 120: [20, 40, 80], + 124: [20, 40, 80], + 128: [20, 40, 80], + 132: [20, 40, 80], + 136: [20, 40, 80], + 140: [20, 40, 80], + 144: [20, 40, 80], + 149: [20, 40, 80], + 153: [20, 40, 80], + 157: [20, 40, 80], + 161: [20, 40, 80], + 165: [20], +} + +# All universally accepted 2.4GHz channels and frequencies. +WORLD_WIDE_2G_CHANNELS: ChannelBandwidthMap = { + 1: [20], + 2: [20], + 3: [20], + 4: [20], + 5: [20], + 6: [20], + 7: [20], + 8: [20], + 9: [20], + 10: [20], + 11: [20], +} + +# List of supported channels and frequencies by country. +# +# Please keep this alphabetically ordered. Thanks! +# +# TODO: Add missing countries: Russia, Israel, Korea, Turkey, South Africa, +# Brazil, Bahrain, Vietnam +COUNTRY_CHANNELS = { + "Australia": CountryChannels( + country_code="AU", + allowed_channels=WORLD_WIDE_2G_CHANNELS + | { + 12: [20], + 13: [20], + 36: [20, 40, 80], + 40: [20, 40, 80], + 44: [20, 40, 80], + 48: [20, 40, 80], + 52: [20, 40, 80], + 56: [20, 40, 80], + 60: [20, 40, 80], + 64: [20, 40, 80], + 100: [20, 40, 80], + 104: [20, 40, 80], + 108: [20, 40, 80], + 112: [20, 40, 80], + 116: [20, 40, 80], + 132: [20, 40, 80], + 136: [20, 40, 80], + 140: [20, 40, 80], + 144: [20, 40, 80], + 149: [20, 40, 80], + 153: [20, 40, 80], + 157: [20, 40, 80], + 161: [20, 40, 80], + 165: [20], + }, + ), + "Austria": CountryChannels( + country_code="AT", + allowed_channels=WORLD_WIDE_2G_CHANNELS + | { + 12: [20], + 13: [20], + 36: [20, 40, 80], + 40: [20, 40, 80], + 44: [20, 40, 80], + 48: [20, 40, 80], + 52: [20, 40, 80], + 56: [20, 40, 80], + 60: [20, 40, 80], + 64: [20, 40, 80], + 100: [20, 40, 80], + 104: [20, 40, 80], + 108: [20, 40, 80], + 112: [20, 40, 80], + 116: [20, 40, 80], + 120: [20, 40, 80], + 124: [20, 40, 80], + 128: [20, 40, 80], + 132: [20, 40, 80], + 136: [20, 40, 80], + 140: [20, 40, 80], + }, + ), + "Belgium": CountryChannels( + country_code="BE", + allowed_channels=WORLD_WIDE_2G_CHANNELS + | { + 12: [20], + 13: [20], + 36: [20, 40, 80], + 40: [20, 40, 80], + 44: [20, 40, 80], + 48: [20, 40, 80], + 52: [20, 40, 80], + 56: [20, 40, 80], + 60: [20, 40, 80], + 64: [20, 40, 80], + 100: [20, 40, 80], + 104: [20, 40, 80], + 108: [20, 40, 80], + 112: [20, 40, 80], + 116: [20, 40, 80], + 120: [20, 40, 80], + 124: [20, 40, 80], + 128: [20, 40, 80], + 132: [20, 40, 80], + 136: [20, 40, 80], + 140: [20, 40, 80], + }, + ), + "Canada": CountryChannels( + country_code="CA", + allowed_channels=WORLD_WIDE_2G_CHANNELS + | { + 36: [20, 40, 80], + 40: [20, 40, 80], + 44: [20, 40, 80], + 48: [20, 40, 80], + 52: [20, 40, 80], + 56: [20, 40, 80], + 60: [20, 40, 80], + 64: [20, 40, 80], + 100: [20, 40, 80], + 104: [20, 40, 80], + 108: [20, 40, 80], + 112: [20, 40, 80], + 116: [20, 40, 80], + 132: [20, 40, 80], + 136: [20, 40, 80], + 140: [20, 40, 80], + 144: [20, 40, 80], + 149: [20, 40, 80], + 153: [20, 40, 80], + 157: [20, 40, 80], + 161: [20, 40, 80], + 165: [20], + }, + ), + "China": CountryChannels( + country_code="CH", + allowed_channels=WORLD_WIDE_2G_CHANNELS + | { + 12: [20], + 13: [20], + 36: [20, 40, 80], + 40: [20, 40, 80], + 44: [20, 40, 80], + 48: [20, 40, 80], + 52: [20, 40, 80], + 56: [20, 40, 80], + 60: [20, 40, 80], + 64: [20, 40, 80], + 100: [20, 40, 80], + 104: [20, 40, 80], + 108: [20, 40, 80], + 112: [20, 40, 80], + 116: [20, 40, 80], + 120: [20, 40, 80], + 124: [20, 40, 80], + 128: [20, 40, 80], + 132: [20, 40, 80], + 136: [20, 40, 80], + 140: [20, 40, 80], + }, + ), + "Denmark": CountryChannels( + country_code="DK", + allowed_channels=WORLD_WIDE_2G_CHANNELS + | { + 12: [20], + 13: [20], + 36: [20, 40, 80], + 40: [20, 40, 80], + 44: [20, 40, 80], + 48: [20, 40, 80], + 52: [20, 40, 80], + 56: [20, 40, 80], + 60: [20, 40, 80], + 64: [20, 40, 80], + 100: [20, 40, 80], + 104: [20, 40, 80], + 108: [20, 40, 80], + 112: [20, 40, 80], + 116: [20, 40, 80], + 120: [20, 40, 80], + 124: [20, 40, 80], + 128: [20, 40, 80], + 132: [20, 40, 80], + 136: [20, 40, 80], + 140: [20, 40, 80], + }, + ), + "France": CountryChannels( + country_code="FR", + allowed_channels=WORLD_WIDE_2G_CHANNELS + | { + 12: [20], + 13: [20], + 36: [20, 40, 80], + 40: [20, 40, 80], + 44: [20, 40, 80], + 48: [20, 40, 80], + 52: [20, 40, 80], + 56: [20, 40, 80], + 60: [20, 40, 80], + 64: [20, 40, 80], + 100: [20, 40, 80], + 104: [20, 40, 80], + 108: [20, 40, 80], + 112: [20, 40, 80], + 116: [20, 40, 80], + 120: [20, 40, 80], + 124: [20, 40, 80], + 128: [20, 40, 80], + 132: [20, 40, 80], + 136: [20, 40, 80], + 140: [20, 40, 80], + }, + ), + "Germany": CountryChannels( + country_code="DE", + allowed_channels=WORLD_WIDE_2G_CHANNELS + | { + 12: [20], + 13: [20], + 36: [20, 40, 80], + 40: [20, 40, 80], + 44: [20, 40, 80], + 48: [20, 40, 80], + 52: [20, 40, 80], + 56: [20, 40, 80], + 60: [20, 40, 80], + 64: [20, 40, 80], + 100: [20, 40, 80], + 104: [20, 40, 80], + 108: [20, 40, 80], + 112: [20, 40, 80], + 116: [20, 40, 80], + 120: [20, 40, 80], + 124: [20, 40, 80], + 128: [20, 40, 80], + 132: [20, 40, 80], + 136: [20, 40, 80], + 140: [20, 40, 80], + }, + ), + "India": CountryChannels( + country_code="IN", + allowed_channels=WORLD_WIDE_2G_CHANNELS + | { + 12: [20], + 13: [20], + 36: [20, 40, 80], + 40: [20, 40, 80], + 44: [20, 40, 80], + 48: [20, 40, 80], + 52: [20, 40, 80], + 56: [20, 40, 80], + 60: [20, 40, 80], + 64: [20, 40, 80], + 100: [20, 40, 80], + 104: [20, 40, 80], + 108: [20, 40, 80], + 112: [20, 40, 80], + 116: [20, 40, 80], + 120: [20, 40, 80], + 124: [20, 40, 80], + 128: [20, 40, 80], + 132: [20, 40, 80], + 136: [20, 40, 80], + 140: [20, 40, 80], + 144: [20, 40, 80], + 149: [20, 40, 80], + 153: [20, 40, 80], + 157: [20, 40, 80], + 161: [20, 40, 80], + 165: [20], + }, + ), + "Ireland": CountryChannels( + country_code="IE", + allowed_channels=WORLD_WIDE_2G_CHANNELS + | { + 12: [20], + 13: [20], + 36: [20, 40, 80], + 40: [20, 40, 80], + 44: [20, 40, 80], + 48: [20, 40, 80], + 52: [20, 40, 80], + 56: [20, 40, 80], + 60: [20, 40, 80], + 64: [20, 40, 80], + 100: [20, 40, 80], + 104: [20, 40, 80], + 108: [20, 40, 80], + 112: [20, 40, 80], + 116: [20, 40, 80], + 120: [20, 40, 80], + 124: [20, 40, 80], + 128: [20, 40, 80], + 132: [20, 40, 80], + 136: [20, 40, 80], + 140: [20, 40, 80], + }, + ), + "Italy": CountryChannels( + country_code="IT", + allowed_channels=WORLD_WIDE_2G_CHANNELS + | { + 12: [20], + 13: [20], + 36: [20, 40, 80], + 40: [20, 40, 80], + 44: [20, 40, 80], + 48: [20, 40, 80], + 52: [20, 40, 80], + 56: [20, 40, 80], + 60: [20, 40, 80], + 64: [20, 40, 80], + 100: [20, 40, 80], + 104: [20, 40, 80], + 108: [20, 40, 80], + 112: [20, 40, 80], + 116: [20, 40, 80], + 120: [20, 40, 80], + 124: [20, 40, 80], + 128: [20, 40, 80], + 132: [20, 40, 80], + 136: [20, 40, 80], + 140: [20, 40, 80], + }, + ), + "Japan": CountryChannels( + country_code="JP", + allowed_channels=WORLD_WIDE_2G_CHANNELS + | { + 12: [20], + 13: [20], + 36: [20, 40, 80], + 40: [20, 40, 80], + 44: [20, 40, 80], + 48: [20, 40, 80], + 52: [20, 40, 80], + 56: [20, 40, 80], + 60: [20, 40, 80], + 64: [20, 40, 80], + 100: [20, 40, 80], + 104: [20, 40, 80], + 108: [20, 40, 80], + 112: [20, 40, 80], + 116: [20, 40, 80], + 120: [20, 40, 80], + 124: [20, 40, 80], + 128: [20, 40, 80], + 132: [20, 40, 80], + 136: [20, 40, 80], + 140: [20, 40, 80], + 144: [20, 40, 80], + }, + ), + "Mexico": CountryChannels( + country_code="MX", + allowed_channels=WORLD_WIDE_2G_CHANNELS + | { + 36: [20, 40, 80], + 40: [20, 40, 80], + 44: [20, 40, 80], + 48: [20, 40, 80], + 52: [20, 40, 80], + 56: [20, 40, 80], + 60: [20, 40, 80], + 64: [20, 40, 80], + 100: [20, 40, 80], + 104: [20, 40, 80], + 108: [20, 40, 80], + 112: [20, 40, 80], + 116: [20, 40, 80], + 132: [20, 40, 80], + 136: [20, 40, 80], + 140: [20, 40, 80], + 144: [20, 40, 80], + 149: [20, 40, 80], + 153: [20, 40, 80], + 157: [20, 40, 80], + 161: [20, 40, 80], + 165: [20], + }, + ), + "Netherlands": CountryChannels( + country_code="NL", + allowed_channels=WORLD_WIDE_2G_CHANNELS + | { + 12: [20], + 13: [20], + 36: [20, 40, 80], + 40: [20, 40, 80], + 44: [20, 40, 80], + 48: [20, 40, 80], + 52: [20, 40, 80], + 56: [20, 40, 80], + 60: [20, 40, 80], + 64: [20, 40, 80], + 100: [20, 40, 80], + 104: [20, 40, 80], + 108: [20, 40, 80], + 112: [20, 40, 80], + 116: [20, 40, 80], + 120: [20, 40, 80], + 124: [20, 40, 80], + 128: [20, 40, 80], + 132: [20, 40, 80], + 136: [20, 40, 80], + 140: [20, 40, 80], + }, + ), + "New Zealand": CountryChannels( + country_code="NZ", + allowed_channels=WORLD_WIDE_2G_CHANNELS + | { + 12: [20], + 13: [20], + 36: [20, 40, 80], + 40: [20, 40, 80], + 44: [20, 40, 80], + 48: [20, 40, 80], + 52: [20, 40, 80], + 56: [20, 40, 80], + 60: [20, 40, 80], + 64: [20, 40, 80], + 100: [20, 40, 80], + 104: [20, 40, 80], + 108: [20, 40, 80], + 112: [20, 40, 80], + 116: [20, 40, 80], + 120: [20, 40, 80], + 124: [20, 40, 80], + 128: [20, 40, 80], + 132: [20, 40, 80], + 136: [20, 40, 80], + 140: [20, 40, 80], + 144: [20, 40, 80], + 149: [20, 40, 80], + 153: [20, 40, 80], + 157: [20, 40, 80], + 161: [20, 40, 80], + 165: [20], + }, + ), + "Norway": CountryChannels( + country_code="NO", + allowed_channels=WORLD_WIDE_2G_CHANNELS + | { + 12: [20], + 13: [20], + 36: [20, 40, 80], + 40: [20, 40, 80], + 44: [20, 40, 80], + 48: [20, 40, 80], + 52: [20, 40, 80], + 56: [20, 40, 80], + 60: [20, 40, 80], + 64: [20, 40, 80], + 100: [20, 40, 80], + 104: [20, 40, 80], + 108: [20, 40, 80], + 112: [20, 40, 80], + 116: [20, 40, 80], + 120: [20, 40, 80], + 124: [20, 40, 80], + 128: [20, 40, 80], + 132: [20, 40, 80], + 136: [20, 40, 80], + 140: [20, 40, 80], + }, + ), + "Singapore": CountryChannels( + country_code="SG", + allowed_channels=WORLD_WIDE_2G_CHANNELS + | { + 12: [20], + 13: [20], + 36: [20, 40, 80], + 40: [20, 40, 80], + 44: [20, 40, 80], + 48: [20, 40, 80], + 52: [20, 40, 80], + 56: [20, 40, 80], + 60: [20, 40, 80], + 64: [20, 40, 80], + 100: [20, 40, 80], + 104: [20, 40, 80], + 108: [20, 40, 80], + 112: [20, 40, 80], + 116: [20, 40, 80], + 120: [20, 40, 80], + 124: [20, 40, 80], + 128: [20, 40, 80], + 132: [20, 40, 80], + 136: [20, 40, 80], + 140: [20, 40, 80], + 144: [20, 40, 80], + 149: [20, 40, 80], + 153: [20, 40, 80], + 157: [20, 40, 80], + 161: [20, 40, 80], + 165: [20], + }, + ), + "Spain": CountryChannels( + country_code="ES", + allowed_channels=WORLD_WIDE_2G_CHANNELS + | { + 12: [20], + 13: [20], + 36: [20, 40, 80], + 40: [20, 40, 80], + 44: [20, 40, 80], + 48: [20, 40, 80], + 52: [20, 40, 80], + 56: [20, 40, 80], + 60: [20, 40, 80], + 64: [20, 40, 80], + 100: [20, 40, 80], + 104: [20, 40, 80], + 108: [20, 40, 80], + 112: [20, 40, 80], + 116: [20, 40, 80], + 120: [20, 40, 80], + 124: [20, 40, 80], + 128: [20, 40, 80], + 132: [20, 40, 80], + 136: [20, 40, 80], + 140: [20, 40, 80], + }, + ), + "Sweden": CountryChannels( + country_code="SE", + allowed_channels=WORLD_WIDE_2G_CHANNELS + | { + 12: [20], + 13: [20], + 36: [20, 40, 80], + 40: [20, 40, 80], + 44: [20, 40, 80], + 48: [20, 40, 80], + 52: [20, 40, 80], + 56: [20, 40, 80], + 60: [20, 40, 80], + 64: [20, 40, 80], + 100: [20, 40, 80], + 104: [20, 40, 80], + 108: [20, 40, 80], + 112: [20, 40, 80], + 116: [20, 40, 80], + 120: [20, 40, 80], + 124: [20, 40, 80], + 128: [20, 40, 80], + 132: [20, 40, 80], + 136: [20, 40, 80], + 140: [20, 40, 80], + }, + ), + "Taiwan": CountryChannels( + country_code="TW", + allowed_channels=WORLD_WIDE_2G_CHANNELS + | { + 12: [20], + 13: [20], + 36: [20, 40, 80], + 40: [20, 40, 80], + 44: [20, 40, 80], + 48: [20, 40, 80], + 52: [20, 40, 80], + 56: [20, 40, 80], + 60: [20, 40, 80], + 64: [20, 40, 80], + 100: [20, 40, 80], + 104: [20, 40, 80], + 108: [20, 40, 80], + 112: [20, 40, 80], + 116: [20, 40, 80], + 120: [20, 40, 80], + 124: [20, 40, 80], + 128: [20, 40, 80], + 132: [20, 40, 80], + 136: [20, 40, 80], + 140: [20, 40, 80], + 144: [20, 40, 80], + 149: [20, 40, 80], + 153: [20, 40, 80], + 157: [20, 40, 80], + 161: [20, 40, 80], + 165: [20], + }, + ), + "United Kingdom of Great Britain": CountryChannels( + country_code="GB", + allowed_channels=WORLD_WIDE_2G_CHANNELS + | { + 11: [20], + 12: [20], + 13: [20], + 36: [20, 40, 80], + 40: [20, 40, 80], + 44: [20, 40, 80], + 48: [20, 40, 80], + 52: [20, 40, 80], + 56: [20, 40, 80], + 60: [20, 40, 80], + 64: [20, 40, 80], + 100: [20, 40, 80], + 104: [20, 40, 80], + 108: [20, 40, 80], + 112: [20, 40, 80], + 116: [20, 40, 80], + 120: [20, 40, 80], + 124: [20, 40, 80], + 128: [20, 40, 80], + 132: [20, 40, 80], + 136: [20, 40, 80], + 140: [20, 40, 80], + }, + ), + "United States of America": CountryChannels( + country_code="US", + allowed_channels=WORLD_WIDE_2G_CHANNELS + | { + 36: [20, 40, 80], + 40: [20, 40, 80], + 44: [20, 40, 80], + 48: [20, 40, 80], + 52: [20, 40, 80], + 56: [20, 40, 80], + 60: [20, 40, 80], + 64: [20, 40, 80], + 100: [20, 40, 80], + 104: [20, 40, 80], + 108: [20, 40, 80], + 112: [20, 40, 80], + 116: [20, 40, 80], + 120: [20, 40, 80], + 124: [20, 40, 80], + 128: [20, 40, 80], + 132: [20, 40, 80], + 136: [20, 40, 80], + 140: [20, 40, 80], + 144: [20, 40, 80], + 149: [20, 40, 80], + 153: [20, 40, 80], + 157: [20, 40, 80], + 161: [20, 40, 80], + 165: [20], + }, + ), +}
diff --git a/src/antlion/controllers/ap_lib/third_party_ap_profiles/__init__.py b/packages/antlion/controllers/ap_lib/third_party_ap_profiles/__init__.py similarity index 100% rename from src/antlion/controllers/ap_lib/third_party_ap_profiles/__init__.py rename to packages/antlion/controllers/ap_lib/third_party_ap_profiles/__init__.py
diff --git a/src/antlion/controllers/ap_lib/third_party_ap_profiles/actiontec.py b/packages/antlion/controllers/ap_lib/third_party_ap_profiles/actiontec.py similarity index 76% rename from src/antlion/controllers/ap_lib/third_party_ap_profiles/actiontec.py rename to packages/antlion/controllers/ap_lib/third_party_ap_profiles/actiontec.py index 9e48935..f04f60b 100644 --- a/src/antlion/controllers/ap_lib/third_party_ap_profiles/actiontec.py +++ b/packages/antlion/controllers/ap_lib/third_party_ap_profiles/actiontec.py
@@ -12,19 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -from antlion import utils -from antlion.controllers.ap_lib import hostapd_config -from antlion.controllers.ap_lib import hostapd_constants -from antlion.controllers.ap_lib import hostapd_utils +from antlion.controllers.ap_lib import hostapd_config, hostapd_constants, hostapd_utils +from antlion.controllers.ap_lib.hostapd_security import Security, SecurityMode -def actiontec_pk5000(iface_wlan_2g=None, channel=None, security=None, ssid=None): +def actiontec_pk5000( + iface_wlan_2g: str, channel: int, security: Security, ssid: str | None = None +) -> hostapd_config.HostapdConfig: """A simulated implementation of what a Actiontec PK5000 AP Args: iface_wlan_2g: The 2.4 interface of the test AP. channel: What channel to use. Only 2.4Ghz is supported for this profile - security: A security profile. Must be none or WPA2 as this is what is + security: A security profile. Must be open or WPA2 as this is what is supported by the PK5000. ssid: Network name Returns: @@ -41,13 +41,12 @@ # Technically this should be 14 but since the PK5000 is a US only AP, # 11 is the highest allowable channel. raise ValueError( - "The Actiontec PK5000 does not support 5Ghz. " - "Invalid channel (%s)" % channel + f"The Actiontec PK5000 does not support 5Ghz. Invalid channel ({channel})" ) # Verify interface and security hostapd_utils.verify_interface(iface_wlan_2g, hostapd_constants.INTERFACE_2G_LIST) - hostapd_utils.verify_security_mode(security, [None, hostapd_constants.WPA2]) - if security: + hostapd_utils.verify_security_mode(security, [SecurityMode.OPEN, SecurityMode.WPA2]) + if security.security_mode is not SecurityMode.OPEN: hostapd_utils.verify_cipher(security, [hostapd_constants.WPA2_DEFAULT_CIPER]) interface = iface_wlan_2g @@ -56,9 +55,9 @@ beacon_interval = 100 dtim_period = 3 # Sets the basic rates and supported rates of the PK5000 - additional_params = utils.merge_dicts( - hostapd_constants.CCK_AND_OFDM_BASIC_RATES, - hostapd_constants.CCK_AND_OFDM_DATA_RATES, + additional_params = ( + hostapd_constants.CCK_AND_OFDM_BASIC_RATES + | hostapd_constants.CCK_AND_OFDM_DATA_RATES ) config = hostapd_config.HostapdConfig( @@ -78,7 +77,9 @@ return config -def actiontec_mi424wr(iface_wlan_2g=None, channel=None, security=None, ssid=None): +def actiontec_mi424wr( + iface_wlan_2g: str, channel: int, security: Security, ssid: str | None = None +) -> hostapd_config.HostapdConfig: # TODO(b/143104825): Permit RIFS once it is supported """A simulated implementation of an Actiontec MI424WR AP. Args: @@ -103,13 +104,12 @@ """ if channel > 11: raise ValueError( - "The Actiontec MI424WR does not support 5Ghz. " - "Invalid channel (%s)" % channel + f"The Actiontec MI424WR does not support 5Ghz. Invalid channel ({channel})" ) # Verify interface and security hostapd_utils.verify_interface(iface_wlan_2g, hostapd_constants.INTERFACE_2G_LIST) - hostapd_utils.verify_security_mode(security, [None, hostapd_constants.WPA2]) - if security: + hostapd_utils.verify_security_mode(security, [SecurityMode.OPEN, SecurityMode.WPA2]) + if security.security_mode is not SecurityMode.OPEN: hostapd_utils.verify_cipher(security, [hostapd_constants.WPA2_DEFAULT_CIPER]) n_capabilities = [ @@ -117,9 +117,9 @@ hostapd_constants.N_CAPABILITY_DSSS_CCK_40, hostapd_constants.N_CAPABILITY_RX_STBC1, ] - rates = utils.merge_dicts( - hostapd_constants.CCK_AND_OFDM_DATA_RATES, - hostapd_constants.CCK_AND_OFDM_BASIC_RATES, + rates = ( + hostapd_constants.CCK_AND_OFDM_DATA_RATES + | hostapd_constants.CCK_AND_OFDM_BASIC_RATES ) # Proprietary Atheros Communication: Adv Capability IE # Proprietary Atheros Communication: Unknown IE @@ -130,7 +130,7 @@ "0706555320010b1b" } - additional_params = utils.merge_dicts(rates, vendor_elements) + additional_params = rates | vendor_elements config = hostapd_config.HostapdConfig( ssid=ssid,
diff --git a/src/antlion/controllers/ap_lib/third_party_ap_profiles/asus.py b/packages/antlion/controllers/ap_lib/third_party_ap_profiles/asus.py similarity index 87% rename from src/antlion/controllers/ap_lib/third_party_ap_profiles/asus.py rename to packages/antlion/controllers/ap_lib/third_party_ap_profiles/asus.py index ea25157..6a9ae27 100644 --- a/src/antlion/controllers/ap_lib/third_party_ap_profiles/asus.py +++ b/packages/antlion/controllers/ap_lib/third_party_ap_profiles/asus.py
@@ -12,16 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -from antlion import utils -from antlion.controllers.ap_lib import hostapd_config -from antlion.controllers.ap_lib import hostapd_constants -from antlion.controllers.ap_lib import hostapd_utils +from antlion.controllers.ap_lib import hostapd_config, hostapd_constants, hostapd_utils +from antlion.controllers.ap_lib.hostapd_security import Security, SecurityMode def asus_rtac66u( - iface_wlan_2g=None, iface_wlan_5g=None, channel=None, security=None, ssid=None -): + iface_wlan_2g: str, + iface_wlan_5g: str, + channel: int, + security: Security, + ssid: str | None = None, +) -> hostapd_config.HostapdConfig: # TODO(b/143104825): Permit RIFS once it is supported # TODO(b/144446076): Address non-whirlwind hardware capabilities. """A simulated implementation of an Asus RTAC66U AP. @@ -29,7 +31,7 @@ iface_wlan_2g: The 2.4Ghz interface of the test AP. iface_wlan_5g: The 5Ghz interface of the test AP. channel: What channel to use. - security: A security profile. Must be none or WPA2 as this is what is + security: A security profile. Must be open or WPA2 as this is what is supported by the RTAC66U. ssid: Network name Returns: @@ -75,8 +77,8 @@ # Verify interface and security hostapd_utils.verify_interface(iface_wlan_2g, hostapd_constants.INTERFACE_2G_LIST) hostapd_utils.verify_interface(iface_wlan_5g, hostapd_constants.INTERFACE_5G_LIST) - hostapd_utils.verify_security_mode(security, [None, hostapd_constants.WPA2]) - if security: + hostapd_utils.verify_security_mode(security, [SecurityMode.OPEN, SecurityMode.WPA2]) + if security.security_mode is not SecurityMode.OPEN: hostapd_utils.verify_cipher(security, [hostapd_constants.WPA2_DEFAULT_CIPER]) # Common Parameters @@ -119,9 +121,7 @@ hostapd_constants.AC_CAPABILITY_MAX_A_MPDU_LEN_EXP7, ] - additional_params = utils.merge_dicts( - rates, vendor_elements, hostapd_constants.UAPSD_ENABLED - ) + additional_params = rates | vendor_elements | hostapd_constants.UAPSD_ENABLED config = hostapd_config.HostapdConfig( ssid=ssid, @@ -144,14 +144,18 @@ def asus_rtac86u( - iface_wlan_2g=None, iface_wlan_5g=None, channel=None, security=None, ssid=None -): + iface_wlan_2g: str, + iface_wlan_5g: str, + channel: int, + security: Security, + ssid: str | None = None, +) -> hostapd_config.HostapdConfig: """A simulated implementation of an Asus RTAC86U AP. Args: iface_wlan_2g: The 2.4Ghz interface of the test AP. iface_wlan_5g: The 5Ghz interface of the test AP. channel: What channel to use. - security: A security profile. Must be none or WPA2 as this is what is + security: A security profile. Must be open or WPA2 as this is what is supported by the RTAC86U. ssid: Network name Returns: @@ -180,8 +184,8 @@ # Verify interface and security hostapd_utils.verify_interface(iface_wlan_2g, hostapd_constants.INTERFACE_2G_LIST) hostapd_utils.verify_interface(iface_wlan_5g, hostapd_constants.INTERFACE_5G_LIST) - hostapd_utils.verify_security_mode(security, [None, hostapd_constants.WPA2]) - if security: + hostapd_utils.verify_security_mode(security, [SecurityMode.OPEN, SecurityMode.WPA2]) + if security.security_mode is not SecurityMode.OPEN: hostapd_utils.verify_cipher(security, [hostapd_constants.WPA2_DEFAULT_CIPER]) # Common Parameters @@ -202,7 +206,7 @@ interface = iface_wlan_5g mode = hostapd_constants.MODE_11A rates.update(hostapd_constants.OFDM_ONLY_BASIC_RATES) - spectrum_mgmt = (True,) + spectrum_mgmt = True # Country Information IE (w/ individual channel info) # TPC Report Transmit Power IE # Measurement Pilot Transmission IE @@ -214,7 +218,7 @@ "42020000" } - additional_params = utils.merge_dicts(rates, qbss, vendor_elements) + additional_params = rates | qbss | vendor_elements config = hostapd_config.HostapdConfig( ssid=ssid, @@ -234,8 +238,12 @@ def asus_rtac5300( - iface_wlan_2g=None, iface_wlan_5g=None, channel=None, security=None, ssid=None -): + iface_wlan_2g: str, + iface_wlan_5g: str, + channel: int, + security: Security, + ssid: str | None = None, +) -> hostapd_config.HostapdConfig: # TODO(b/143104825): Permit RIFS once it is supported # TODO(b/144446076): Address non-whirlwind hardware capabilities. """A simulated implementation of an Asus RTAC5300 AP. @@ -243,7 +251,7 @@ iface_wlan_2g: The 2.4Ghz interface of the test AP. iface_wlan_5g: The 5Ghz interface of the test AP. channel: What channel to use. - security: A security profile. Must be none or WPA2 as this is what is + security: A security profile. Must be open or WPA2 as this is what is supported by the RTAC5300. ssid: Network name Returns: @@ -286,8 +294,8 @@ # Verify interface and security hostapd_utils.verify_interface(iface_wlan_2g, hostapd_constants.INTERFACE_2G_LIST) hostapd_utils.verify_interface(iface_wlan_5g, hostapd_constants.INTERFACE_5G_LIST) - hostapd_utils.verify_security_mode(security, [None, hostapd_constants.WPA2]) - if security: + hostapd_utils.verify_security_mode(security, [SecurityMode.OPEN, SecurityMode.WPA2]) + if security.security_mode is not SecurityMode.OPEN: hostapd_utils.verify_cipher(security, [hostapd_constants.WPA2_DEFAULT_CIPER]) # Common Parameters @@ -334,9 +342,7 @@ hostapd_constants.AC_CAPABILITY_MAX_A_MPDU_LEN_EXP7, ] - additional_params = utils.merge_dicts( - rates, qbss, vendor_elements, hostapd_constants.UAPSD_ENABLED - ) + additional_params = rates | qbss | vendor_elements | hostapd_constants.UAPSD_ENABLED config = hostapd_config.HostapdConfig( ssid=ssid, @@ -358,14 +364,18 @@ def asus_rtn56u( - iface_wlan_2g=None, iface_wlan_5g=None, channel=None, security=None, ssid=None -): + iface_wlan_2g: str, + iface_wlan_5g: str, + channel: int, + security: Security, + ssid: str | None = None, +) -> hostapd_config.HostapdConfig: """A simulated implementation of an Asus RTN56U AP. Args: iface_wlan_2g: The 2.4Ghz interface of the test AP. iface_wlan_5g: The 5Ghz interface of the test AP. channel: What channel to use. - security: A security profile. Must be none or WPA2 as this is what is + security: A security profile. Must be open or WPA2 as this is what is supported by the RTN56U. ssid: Network name Returns: @@ -396,8 +406,8 @@ # Verify interface and security hostapd_utils.verify_interface(iface_wlan_2g, hostapd_constants.INTERFACE_2G_LIST) hostapd_utils.verify_interface(iface_wlan_5g, hostapd_constants.INTERFACE_5G_LIST) - hostapd_utils.verify_security_mode(security, [None, hostapd_constants.WPA2]) - if security: + hostapd_utils.verify_security_mode(security, [SecurityMode.OPEN, SecurityMode.WPA2]) + if security.security_mode is not SecurityMode.OPEN: hostapd_utils.verify_cipher(security, [hostapd_constants.WPA2_DEFAULT_CIPER]) # Common Parameters @@ -435,9 +445,7 @@ # US Country Code IE vendor_elements = {"vendor_elements": "dd07000c4307000000" "0706555320010b14"} - additional_params = utils.merge_dicts( - rates, vendor_elements, qbss, hostapd_constants.UAPSD_ENABLED - ) + additional_params = rates | vendor_elements | qbss | hostapd_constants.UAPSD_ENABLED config = hostapd_config.HostapdConfig( ssid=ssid, @@ -458,15 +466,19 @@ def asus_rtn66u( - iface_wlan_2g=None, iface_wlan_5g=None, channel=None, security=None, ssid=None -): + iface_wlan_2g: str, + iface_wlan_5g: str, + channel: int, + security: Security, + ssid: str | None = None, +) -> hostapd_config.HostapdConfig: # TODO(b/143104825): Permit RIFS once it is supported """A simulated implementation of an Asus RTN66U AP. Args: iface_wlan_2g: The 2.4Ghz interface of the test AP. iface_wlan_5g: The 5Ghz interface of the test AP. channel: What channel to use. - security: A security profile. Must be none or WPA2 as this is what is + security: A security profile. Must be open or WPA2 as this is what is supported by the RTN66U. ssid: Network name Returns: @@ -495,8 +507,8 @@ # Verify interface and security hostapd_utils.verify_interface(iface_wlan_2g, hostapd_constants.INTERFACE_2G_LIST) hostapd_utils.verify_interface(iface_wlan_5g, hostapd_constants.INTERFACE_5G_LIST) - hostapd_utils.verify_security_mode(security, [None, hostapd_constants.WPA2]) - if security: + hostapd_utils.verify_security_mode(security, [SecurityMode.OPEN, SecurityMode.WPA2]) + if security.security_mode is not SecurityMode.OPEN: hostapd_utils.verify_cipher(security, [hostapd_constants.WPA2_DEFAULT_CIPER]) # Common Parameters @@ -522,9 +534,7 @@ interface = iface_wlan_5g rates.update(hostapd_constants.OFDM_ONLY_BASIC_RATES) - additional_params = utils.merge_dicts( - rates, vendor_elements, hostapd_constants.UAPSD_ENABLED - ) + additional_params = rates | vendor_elements | hostapd_constants.UAPSD_ENABLED config = hostapd_config.HostapdConfig( ssid=ssid,
diff --git a/src/antlion/controllers/ap_lib/third_party_ap_profiles/belkin.py b/packages/antlion/controllers/ap_lib/third_party_ap_profiles/belkin.py similarity index 77% rename from src/antlion/controllers/ap_lib/third_party_ap_profiles/belkin.py rename to packages/antlion/controllers/ap_lib/third_party_ap_profiles/belkin.py index 9c5c99d..62a9d66 100644 --- a/src/antlion/controllers/ap_lib/third_party_ap_profiles/belkin.py +++ b/packages/antlion/controllers/ap_lib/third_party_ap_profiles/belkin.py
@@ -12,20 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. -from antlion import utils -from antlion.controllers.ap_lib import hostapd_config -from antlion.controllers.ap_lib import hostapd_constants -from antlion.controllers.ap_lib import hostapd_utils +from antlion.controllers.ap_lib import hostapd_config, hostapd_constants, hostapd_utils +from antlion.controllers.ap_lib.hostapd_security import Security, SecurityMode -def belkin_f9k1001v5(iface_wlan_2g=None, channel=None, security=None, ssid=None): +def belkin_f9k1001v5( + iface_wlan_2g: str, channel: int, security: Security, ssid: str | None = None +) -> hostapd_config.HostapdConfig: # TODO(b/143104825): Permit RIFS once it is supported """A simulated implementation of what a Belkin F9K1001v5 AP Args: iface_wlan_2g: The 2.4Ghz interface of the test AP. channel: What channel to use. - security: A security profile (None or WPA2). + security: A security profile (open or WPA2). ssid: The network name. Returns: A hostapd config. @@ -50,13 +50,12 @@ """ if channel > 11: raise ValueError( - "The Belkin F9k1001v5 does not support 5Ghz. " - "Invalid channel (%s)" % channel + f"The Belkin F9k1001v5 does not support 5Ghz. Invalid channel ({channel})" ) # Verify interface and security hostapd_utils.verify_interface(iface_wlan_2g, hostapd_constants.INTERFACE_2G_LIST) - hostapd_utils.verify_security_mode(security, [None, hostapd_constants.WPA2]) - if security: + hostapd_utils.verify_security_mode(security, [SecurityMode.OPEN, SecurityMode.WPA2]) + if security.security_mode is not SecurityMode.OPEN: hostapd_utils.verify_cipher(security, [hostapd_constants.WPA2_DEFAULT_CIPER]) n_capabilities = [ @@ -67,9 +66,9 @@ hostapd_constants.N_CAPABILITY_DSSS_CCK_40, ] - rates = additional_params = utils.merge_dicts( - hostapd_constants.CCK_AND_OFDM_BASIC_RATES, - hostapd_constants.CCK_AND_OFDM_DATA_RATES, + rates = ( + hostapd_constants.CCK_AND_OFDM_BASIC_RATES + | hostapd_constants.CCK_AND_OFDM_DATA_RATES ) # Broadcom IE @@ -79,7 +78,7 @@ "dd180050f204104a00011010440001021049000600372a000120" } - additional_params = utils.merge_dicts(rates, vendor_elements) + additional_params = rates | vendor_elements config = hostapd_config.HostapdConfig( ssid=ssid,
diff --git a/src/antlion/controllers/ap_lib/third_party_ap_profiles/linksys.py b/packages/antlion/controllers/ap_lib/third_party_ap_profiles/linksys.py similarity index 86% rename from src/antlion/controllers/ap_lib/third_party_ap_profiles/linksys.py rename to packages/antlion/controllers/ap_lib/third_party_ap_profiles/linksys.py index 8010837..21f3fb1 100644 --- a/src/antlion/controllers/ap_lib/third_party_ap_profiles/linksys.py +++ b/packages/antlion/controllers/ap_lib/third_party_ap_profiles/linksys.py
@@ -12,16 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -from antlion import utils -from antlion.controllers.ap_lib import hostapd_config -from antlion.controllers.ap_lib import hostapd_constants -from antlion.controllers.ap_lib import hostapd_utils +from antlion.controllers.ap_lib import hostapd_config, hostapd_constants, hostapd_utils +from antlion.controllers.ap_lib.hostapd_security import Security, SecurityMode def linksys_ea4500( - iface_wlan_2g=None, iface_wlan_5g=None, channel=None, security=None, ssid=None -): + iface_wlan_2g: str, + iface_wlan_5g: str, + channel: int, + security: Security, + ssid: str | None = None, +) -> hostapd_config.HostapdConfig: # TODO(b/143104825): Permit RIFS once it is supported # TODO(b/144446076): Address non-whirlwind hardware capabilities. """A simulated implementation of what a Linksys EA4500 AP @@ -29,7 +31,7 @@ iface_wlan_2g: The 2.4Ghz interface of the test AP. iface_wlan_5g: The 5GHz interface of the test AP. channel: What channel to use. - security: A security profile (None or WPA2). + security: A security profile (open or WPA2). ssid: The network name. Returns: A hostapd config. @@ -53,8 +55,8 @@ # Verify interface and security hostapd_utils.verify_interface(iface_wlan_2g, hostapd_constants.INTERFACE_2G_LIST) hostapd_utils.verify_interface(iface_wlan_5g, hostapd_constants.INTERFACE_5G_LIST) - hostapd_utils.verify_security_mode(security, [None, hostapd_constants.WPA2]) - if security: + hostapd_utils.verify_security_mode(security, [SecurityMode.OPEN, SecurityMode.WPA2]) + if security.security_mode is not SecurityMode.OPEN: hostapd_utils.verify_cipher(security, [hostapd_constants.WPA2_DEFAULT_CIPER]) # Common Parameters @@ -90,9 +92,7 @@ rates.update(hostapd_constants.OFDM_ONLY_BASIC_RATES) obss_interval = None - additional_params = utils.merge_dicts( - rates, vendor_elements, hostapd_constants.UAPSD_ENABLED - ) + additional_params = rates | vendor_elements | hostapd_constants.UAPSD_ENABLED config = hostapd_config.HostapdConfig( ssid=ssid, @@ -114,14 +114,18 @@ def linksys_ea9500( - iface_wlan_2g=None, iface_wlan_5g=None, channel=None, security=None, ssid=None -): + iface_wlan_2g: str, + iface_wlan_5g: str, + channel: int, + security: Security, + ssid: str | None = None, +) -> hostapd_config.HostapdConfig: """A simulated implementation of what a Linksys EA9500 AP Args: iface_wlan_2g: The 2.4Ghz interface of the test AP. iface_wlan_5g: The 5GHz interface of the test AP. channel: What channel to use. - security: A security profile (None or WPA2). + security: A security profile (open or WPA2). ssid: The network name. Returns: A hostapd config. @@ -143,8 +147,8 @@ # Verify interface and security hostapd_utils.verify_interface(iface_wlan_2g, hostapd_constants.INTERFACE_2G_LIST) hostapd_utils.verify_interface(iface_wlan_5g, hostapd_constants.INTERFACE_5G_LIST) - hostapd_utils.verify_security_mode(security, [None, hostapd_constants.WPA2]) - if security: + hostapd_utils.verify_security_mode(security, [SecurityMode.OPEN, SecurityMode.WPA2]) + if security.security_mode is not SecurityMode.OPEN: hostapd_utils.verify_cipher(security, [hostapd_constants.WPA2_DEFAULT_CIPER]) # Common Parameters @@ -165,7 +169,7 @@ mode = hostapd_constants.MODE_11A rates.update(hostapd_constants.OFDM_ONLY_BASIC_RATES) - additional_params = utils.merge_dicts(rates, qbss, vendor_elements) + additional_params = rates | qbss | vendor_elements config = hostapd_config.HostapdConfig( ssid=ssid, @@ -184,15 +188,19 @@ def linksys_wrt1900acv2( - iface_wlan_2g=None, iface_wlan_5g=None, channel=None, security=None, ssid=None -): + iface_wlan_2g: str, + iface_wlan_5g: str, + channel: int, + security: Security, + ssid: str | None = None, +) -> hostapd_config.HostapdConfig: # TODO(b/144446076): Address non-whirlwind hardware capabilities. """A simulated implementation of what a Linksys WRT1900ACV2 AP Args: iface_wlan_2g: The 2.4Ghz interface of the test AP. iface_wlan_5g: The 5GHz interface of the test AP. channel: What channel to use. - security: A security profile (None or WPA2). + security: A security profile (open or WPA2). ssid: The network name. Returns: A hostapd config. @@ -223,8 +231,8 @@ # Verify interface and security hostapd_utils.verify_interface(iface_wlan_2g, hostapd_constants.INTERFACE_2G_LIST) hostapd_utils.verify_interface(iface_wlan_5g, hostapd_constants.INTERFACE_5G_LIST) - hostapd_utils.verify_security_mode(security, [None, hostapd_constants.WPA2]) - if security: + hostapd_utils.verify_security_mode(security, [SecurityMode.OPEN, SecurityMode.WPA2]) + if security.security_mode is not SecurityMode.OPEN: hostapd_utils.verify_cipher(security, [hostapd_constants.WPA2_DEFAULT_CIPER]) # Common Parameters @@ -265,15 +273,15 @@ interface = iface_wlan_5g rates.update(hostapd_constants.OFDM_ONLY_BASIC_RATES) obss_interval = None - spectrum_mgmt = (True,) + spectrum_mgmt = True local_pwr_constraint = {"local_pwr_constraint": 3} # Country Information IE (w/ individual channel info) vendor_elements["vendor_elements"] += ( "071e5553202401112801112c011130" "01119501179901179d0117a10117a50117" ) - additional_params = utils.merge_dicts( - rates, vendor_elements, hostapd_constants.UAPSD_ENABLED, local_pwr_constraint + additional_params = ( + rates | vendor_elements | hostapd_constants.UAPSD_ENABLED | local_pwr_constraint ) config = hostapd_config.HostapdConfig(
diff --git a/src/antlion/controllers/ap_lib/third_party_ap_profiles/netgear.py b/packages/antlion/controllers/ap_lib/third_party_ap_profiles/netgear.py similarity index 87% rename from src/antlion/controllers/ap_lib/third_party_ap_profiles/netgear.py rename to packages/antlion/controllers/ap_lib/third_party_ap_profiles/netgear.py index 25a91cd..69c1845 100644 --- a/src/antlion/controllers/ap_lib/third_party_ap_profiles/netgear.py +++ b/packages/antlion/controllers/ap_lib/third_party_ap_profiles/netgear.py
@@ -12,16 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -from antlion import utils -from antlion.controllers.ap_lib import hostapd_config -from antlion.controllers.ap_lib import hostapd_constants -from antlion.controllers.ap_lib import hostapd_utils +from antlion.controllers.ap_lib import hostapd_config, hostapd_constants, hostapd_utils +from antlion.controllers.ap_lib.hostapd_security import Security, SecurityMode def netgear_r7000( - iface_wlan_2g=None, iface_wlan_5g=None, channel=None, security=None, ssid=None -): + iface_wlan_2g: str, + iface_wlan_5g: str, + channel: int, + security: Security, + ssid: str | None = None, +) -> hostapd_config.HostapdConfig: # TODO(b/143104825): Permit RIFS once it is supported # TODO(b/144446076): Address non-whirlwind hardware capabilities. """A simulated implementation of what a Netgear R7000 AP @@ -29,7 +31,7 @@ iface_wlan_2g: The 2.4Ghz interface of the test AP. iface_wlan_5g: The 5GHz interface of the test AP. channel: What channel to use. - security: A security profile (None or WPA2). + security: A security profile (open or WPA2). ssid: The network name. Returns: A hostapd config. @@ -79,8 +81,8 @@ # Verify interface and security hostapd_utils.verify_interface(iface_wlan_2g, hostapd_constants.INTERFACE_2G_LIST) hostapd_utils.verify_interface(iface_wlan_5g, hostapd_constants.INTERFACE_5G_LIST) - hostapd_utils.verify_security_mode(security, [None, hostapd_constants.WPA2]) - if security: + hostapd_utils.verify_security_mode(security, [SecurityMode.OPEN, SecurityMode.WPA2]) + if security.security_mode is not SecurityMode.OPEN: hostapd_utils.verify_cipher(security, [hostapd_constants.WPA2_DEFAULT_CIPER]) # Common Parameters @@ -138,13 +140,13 @@ hostapd_constants.AC_CAPABILITY_MAX_A_MPDU_LEN_EXP7, ] - additional_params = utils.merge_dicts( - rates, - vendor_elements, - qbss, - hostapd_constants.ENABLE_RRM_BEACON_REPORT, - hostapd_constants.ENABLE_RRM_NEIGHBOR_REPORT, - hostapd_constants.UAPSD_ENABLED, + additional_params = ( + rates + | vendor_elements + | qbss + | hostapd_constants.ENABLE_RRM_BEACON_REPORT + | hostapd_constants.ENABLE_RRM_NEIGHBOR_REPORT + | hostapd_constants.UAPSD_ENABLED ) config = hostapd_config.HostapdConfig( @@ -168,8 +170,12 @@ def netgear_wndr3400( - iface_wlan_2g=None, iface_wlan_5g=None, channel=None, security=None, ssid=None -): + iface_wlan_2g: str, + iface_wlan_5g: str, + channel: int, + security: Security, + ssid: str | None = None, +) -> hostapd_config.HostapdConfig: # TODO(b/143104825): Permit RIFS on 5GHz once it is supported # TODO(b/144446076): Address non-whirlwind hardware capabilities. """A simulated implementation of what a Netgear WNDR3400 AP @@ -177,7 +183,7 @@ iface_wlan_2g: The 2.4Ghz interface of the test AP. iface_wlan_5g: The 5GHz interface of the test AP. channel: What channel to use. - security: A security profile (None or WPA2). + security: A security profile (open or WPA2). ssid: The network name. Returns: A hostapd config. @@ -206,8 +212,8 @@ # Verify interface and security hostapd_utils.verify_interface(iface_wlan_2g, hostapd_constants.INTERFACE_2G_LIST) hostapd_utils.verify_interface(iface_wlan_5g, hostapd_constants.INTERFACE_5G_LIST) - hostapd_utils.verify_security_mode(security, [None, hostapd_constants.WPA2]) - if security: + hostapd_utils.verify_security_mode(security, [SecurityMode.OPEN, SecurityMode.WPA2]) + if security.security_mode is not SecurityMode.OPEN: hostapd_utils.verify_cipher(security, [hostapd_constants.WPA2_DEFAULT_CIPER]) # Common Parameters @@ -241,9 +247,7 @@ obss_interval = None n_capabilities.append(hostapd_constants.N_CAPABILITY_HT40_PLUS) - additional_params = utils.merge_dicts( - rates, vendor_elements, hostapd_constants.UAPSD_ENABLED - ) + additional_params = rates | vendor_elements | hostapd_constants.UAPSD_ENABLED config = hostapd_config.HostapdConfig( ssid=ssid,
diff --git a/src/antlion/controllers/ap_lib/third_party_ap_profiles/securifi.py b/packages/antlion/controllers/ap_lib/third_party_ap_profiles/securifi.py similarity index 78% rename from src/antlion/controllers/ap_lib/third_party_ap_profiles/securifi.py rename to packages/antlion/controllers/ap_lib/third_party_ap_profiles/securifi.py index 4a5bf68..8b2d0eb 100644 --- a/src/antlion/controllers/ap_lib/third_party_ap_profiles/securifi.py +++ b/packages/antlion/controllers/ap_lib/third_party_ap_profiles/securifi.py
@@ -12,19 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -from antlion import utils -from antlion.controllers.ap_lib import hostapd_config -from antlion.controllers.ap_lib import hostapd_constants -from antlion.controllers.ap_lib import hostapd_utils +from antlion.controllers.ap_lib import hostapd_config, hostapd_constants, hostapd_utils +from antlion.controllers.ap_lib.hostapd_security import Security, SecurityMode -def securifi_almond(iface_wlan_2g=None, channel=None, security=None, ssid=None): +def securifi_almond( + iface_wlan_2g: str, channel: int, security: Security, ssid: str | None = None +) -> hostapd_config.HostapdConfig: """A simulated implementation of a Securifi Almond AP Args: iface_wlan_2g: The 2.4Ghz interface of the test AP. channel: What channel to use. - security: A security profile (None or WPA2). + security: A security profile (open or WPA2). ssid: The network name. Returns: A hostapd config. @@ -48,13 +48,12 @@ """ if channel > 11: raise ValueError( - "The Securifi Almond does not support 5Ghz. " - "Invalid channel (%s)" % channel + f"The Securifi Almond does not support 5Ghz. Invalid channel ({channel})" ) # Verify interface and security hostapd_utils.verify_interface(iface_wlan_2g, hostapd_constants.INTERFACE_2G_LIST) - hostapd_utils.verify_security_mode(security, [None, hostapd_constants.WPA2]) - if security: + hostapd_utils.verify_security_mode(security, [SecurityMode.OPEN, SecurityMode.WPA2]) + if security.security_mode is not SecurityMode.OPEN: hostapd_utils.verify_cipher(security, [hostapd_constants.WPA2_DEFAULT_CIPER]) n_capabilities = [ @@ -66,9 +65,9 @@ hostapd_constants.N_CAPABILITY_DSSS_CCK_40, ] - rates = utils.merge_dicts( - hostapd_constants.CCK_AND_OFDM_BASIC_RATES, - hostapd_constants.CCK_AND_OFDM_DATA_RATES, + rates = ( + hostapd_constants.CCK_AND_OFDM_BASIC_RATES + | hostapd_constants.CCK_AND_OFDM_DATA_RATES ) # Ralink Technology IE @@ -83,7 +82,7 @@ qbss = {"bss_load_update_period": 50, "chan_util_avg_period": 600} - additional_params = utils.merge_dicts(rates, vendor_elements, qbss) + additional_params = rates | vendor_elements | qbss config = hostapd_config.HostapdConfig( ssid=ssid,
diff --git a/src/antlion/controllers/ap_lib/third_party_ap_profiles/tplink.py b/packages/antlion/controllers/ap_lib/third_party_ap_profiles/tplink.py similarity index 85% rename from src/antlion/controllers/ap_lib/third_party_ap_profiles/tplink.py rename to packages/antlion/controllers/ap_lib/third_party_ap_profiles/tplink.py index 81eeeec..1a01303 100644 --- a/src/antlion/controllers/ap_lib/third_party_ap_profiles/tplink.py +++ b/packages/antlion/controllers/ap_lib/third_party_ap_profiles/tplink.py
@@ -12,23 +12,25 @@ # See the License for the specific language governing permissions and # limitations under the License. -from antlion import utils -from antlion.controllers.ap_lib import hostapd_config -from antlion.controllers.ap_lib import hostapd_constants -from antlion.controllers.ap_lib import hostapd_utils +from antlion.controllers.ap_lib import hostapd_config, hostapd_constants, hostapd_utils +from antlion.controllers.ap_lib.hostapd_security import Security, SecurityMode def tplink_archerc5( - iface_wlan_2g=None, iface_wlan_5g=None, channel=None, security=None, ssid=None -): + iface_wlan_2g: str, + iface_wlan_5g: str, + channel: int, + security: Security, + ssid: str | None = None, +) -> hostapd_config.HostapdConfig: # TODO(b/144446076): Address non-whirlwind hardware capabilities. """A simulated implementation of an TPLink ArcherC5 AP. Args: iface_wlan_2g: The 2.4Ghz interface of the test AP. iface_wlan_5g: The 5GHz interface of the test AP. channel: What channel to use. - security: A security profile (None or WPA2). + security: A security profile (open or WPA2). ssid: The network name. Returns: A hostapd config. @@ -73,8 +75,8 @@ # Verify interface and security hostapd_utils.verify_interface(iface_wlan_2g, hostapd_constants.INTERFACE_2G_LIST) hostapd_utils.verify_interface(iface_wlan_5g, hostapd_constants.INTERFACE_5G_LIST) - hostapd_utils.verify_security_mode(security, [None, hostapd_constants.WPA2]) - if security: + hostapd_utils.verify_security_mode(security, [SecurityMode.OPEN, SecurityMode.WPA2]) + if security.security_mode is not SecurityMode.OPEN: hostapd_utils.verify_cipher(security, [hostapd_constants.WPA2_DEFAULT_CIPER]) # Common Parameters @@ -120,13 +122,13 @@ hostapd_constants.AC_CAPABILITY_MAX_A_MPDU_LEN_EXP7, ] - additional_params = utils.merge_dicts( - rates, - vendor_elements, - qbss, - hostapd_constants.ENABLE_RRM_BEACON_REPORT, - hostapd_constants.ENABLE_RRM_NEIGHBOR_REPORT, - hostapd_constants.UAPSD_ENABLED, + additional_params = ( + rates + | vendor_elements + | qbss + | hostapd_constants.ENABLE_RRM_BEACON_REPORT + | hostapd_constants.ENABLE_RRM_NEIGHBOR_REPORT + | hostapd_constants.UAPSD_ENABLED ) config = hostapd_config.HostapdConfig( @@ -149,15 +151,19 @@ def tplink_archerc7( - iface_wlan_2g=None, iface_wlan_5g=None, channel=None, security=None, ssid=None -): + iface_wlan_2g: str, + iface_wlan_5g: str, + channel: int, + security: Security, + ssid: str | None = None, +) -> hostapd_config.HostapdConfig: # TODO(b/143104825): Permit RIFS once it is supported """A simulated implementation of an TPLink ArcherC7 AP. Args: iface_wlan_2g: The 2.4Ghz interface of the test AP. iface_wlan_5g: The 5GHz interface of the test AP. channel: What channel to use. - security: A security profile (None or WPA2). + security: A security profile (open or WPA2). ssid: The network name. Returns: A hostapd config. @@ -180,13 +186,13 @@ # Verify interface and security hostapd_utils.verify_interface(iface_wlan_2g, hostapd_constants.INTERFACE_2G_LIST) hostapd_utils.verify_interface(iface_wlan_5g, hostapd_constants.INTERFACE_5G_LIST) - hostapd_utils.verify_security_mode(security, [None, hostapd_constants.WPA2]) - if security: + hostapd_utils.verify_security_mode(security, [SecurityMode.OPEN, SecurityMode.WPA2]) + if security.security_mode is not SecurityMode.OPEN: hostapd_utils.verify_cipher(security, [hostapd_constants.WPA2_DEFAULT_CIPER]) # Common Parameters rates = hostapd_constants.CCK_AND_OFDM_DATA_RATES - vht_channel_width = 80 + vht_channel_width: int | None = 80 n_capabilities = [ hostapd_constants.N_CAPABILITY_LDPC, hostapd_constants.N_CAPABILITY_SGI20, @@ -246,8 +252,8 @@ hostapd_constants.AC_CAPABILITY_TX_ANTENNA_PATTERN, ] - additional_params = utils.merge_dicts( - rates, vendor_elements, hostapd_constants.UAPSD_ENABLED, pwr_constraint + additional_params = ( + rates | vendor_elements | hostapd_constants.UAPSD_ENABLED | pwr_constraint ) config = hostapd_config.HostapdConfig( @@ -271,8 +277,12 @@ def tplink_c1200( - iface_wlan_2g=None, iface_wlan_5g=None, channel=None, security=None, ssid=None -): + iface_wlan_2g: str, + iface_wlan_5g: str, + channel: int, + security: Security, + ssid: str | None = None, +) -> hostapd_config.HostapdConfig: # TODO(b/143104825): Permit RIFS once it is supported # TODO(b/144446076): Address non-whirlwind hardware capabilities. """A simulated implementation of an TPLink C1200 AP. @@ -280,7 +290,7 @@ iface_wlan_2g: The 2.4Ghz interface of the test AP. iface_wlan_5g: The 5GHz interface of the test AP. channel: What channel to use. - security: A security profile (None or WPA2). + security: A security profile (open or WPA2). ssid: The network name. Returns: A hostapd config. @@ -312,8 +322,8 @@ # Verify interface and security hostapd_utils.verify_interface(iface_wlan_2g, hostapd_constants.INTERFACE_2G_LIST) hostapd_utils.verify_interface(iface_wlan_5g, hostapd_constants.INTERFACE_5G_LIST) - hostapd_utils.verify_security_mode(security, [None, hostapd_constants.WPA2]) - if security: + hostapd_utils.verify_security_mode(security, [SecurityMode.OPEN, SecurityMode.WPA2]) + if security.security_mode is not SecurityMode.OPEN: hostapd_utils.verify_cipher(security, [hostapd_constants.WPA2_DEFAULT_CIPER]) # Common Parameters @@ -357,12 +367,12 @@ hostapd_constants.AC_CAPABILITY_MAX_A_MPDU_LEN_EXP7, ] - additional_params = utils.merge_dicts( - rates, - vendor_elements, - hostapd_constants.ENABLE_RRM_BEACON_REPORT, - hostapd_constants.ENABLE_RRM_NEIGHBOR_REPORT, - hostapd_constants.UAPSD_ENABLED, + additional_params = ( + rates + | vendor_elements + | hostapd_constants.ENABLE_RRM_BEACON_REPORT + | hostapd_constants.ENABLE_RRM_NEIGHBOR_REPORT + | hostapd_constants.UAPSD_ENABLED ) config = hostapd_config.HostapdConfig( @@ -384,13 +394,15 @@ return config -def tplink_tlwr940n(iface_wlan_2g=None, channel=None, security=None, ssid=None): +def tplink_tlwr940n( + iface_wlan_2g: str, channel: int, security: Security, ssid: str | None = None +) -> hostapd_config.HostapdConfig: # TODO(b/143104825): Permit RIFS once it is supported """A simulated implementation of an TPLink TLWR940N AP. Args: iface_wlan_2g: The 2.4Ghz interface of the test AP. channel: What channel to use. - security: A security profile (None or WPA2). + security: A security profile (open or WPA2). ssid: The network name. Returns: A hostapd config. @@ -411,8 +423,8 @@ ) # Verify interface and security hostapd_utils.verify_interface(iface_wlan_2g, hostapd_constants.INTERFACE_2G_LIST) - hostapd_utils.verify_security_mode(security, [None, hostapd_constants.WPA2]) - if security: + hostapd_utils.verify_security_mode(security, [SecurityMode.OPEN, SecurityMode.WPA2]) + if security.security_mode is not SecurityMode.OPEN: hostapd_utils.verify_cipher(security, [hostapd_constants.WPA2_DEFAULT_CIPER]) n_capabilities = [ @@ -421,9 +433,9 @@ hostapd_constants.N_CAPABILITY_RX_STBC1, ] - rates = utils.merge_dicts( - hostapd_constants.CCK_AND_OFDM_BASIC_RATES, - hostapd_constants.CCK_AND_OFDM_DATA_RATES, + rates = ( + hostapd_constants.CCK_AND_OFDM_BASIC_RATES + | hostapd_constants.CCK_AND_OFDM_DATA_RATES ) # Atheros Communications, Inc. IE @@ -434,9 +446,7 @@ "0100020001" } - additional_params = utils.merge_dicts( - rates, vendor_elements, hostapd_constants.UAPSD_ENABLED - ) + additional_params = rates | vendor_elements | hostapd_constants.UAPSD_ENABLED config = hostapd_config.HostapdConfig( ssid=ssid,
diff --git a/src/antlion/controllers/ap_lib/wireless_network_management.py b/packages/antlion/controllers/ap_lib/wireless_network_management.py similarity index 89% rename from src/antlion/controllers/ap_lib/wireless_network_management.py rename to packages/antlion/controllers/ap_lib/wireless_network_management.py index 62ba34e..848cf5f 100644 --- a/src/antlion/controllers/ap_lib/wireless_network_management.py +++ b/packages/antlion/controllers/ap_lib/wireless_network_management.py
@@ -14,12 +14,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, NewType, Optional +from typing import NewType from antlion.controllers.ap_lib.radio_measurement import NeighborReportElement BssTransitionCandidateList = NewType( - "BssTransitionCandidateList", List[NeighborReportElement] + "BssTransitionCandidateList", list[NeighborReportElement] ) @@ -58,9 +58,9 @@ ess_disassociation_imminent: bool = False, disassociation_timer: int = 0, validity_interval: int = 1, - bss_termination_duration: Optional[BssTerminationDuration] = None, - session_information_url: Optional[str] = None, - candidate_list: Optional[BssTransitionCandidateList] = None, + bss_termination_duration: BssTerminationDuration | None = None, + session_information_url: str | None = None, + candidate_list: BssTransitionCandidateList | None = None, ): """Create a BSS Transition Management request. @@ -128,7 +128,7 @@ return self._ess_disassociation_imminent @property - def disassociation_timer(self) -> Optional[int]: + def disassociation_timer(self) -> int | None: if self.disassociation_imminent: return self._disassociation_timer # Otherwise, field is reserved. @@ -139,13 +139,13 @@ return self._validity_interval @property - def bss_termination_duration(self) -> Optional[BssTerminationDuration]: + def bss_termination_duration(self) -> BssTerminationDuration | None: return self._bss_termination_duration @property - def session_information_url(self) -> Optional[str]: + def session_information_url(self) -> str | None: return self._session_information_url @property - def candidate_list(self) -> Optional[BssTransitionCandidateList]: + def candidate_list(self) -> BssTransitionCandidateList | None: return self._candidate_list
diff --git a/packages/antlion/controllers/attenuator.py b/packages/antlion/controllers/attenuator.py new file mode 100644 index 0000000..f9c8b97 --- /dev/null +++ b/packages/antlion/controllers/attenuator.py
@@ -0,0 +1,364 @@ +#!/usr/bin/env python3.4 +# +# Copyright 2022 The Fuchsia Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import enum +import logging +from typing import Protocol, runtime_checkable + +from antlion.libs.proc import job +from antlion.types import ControllerConfig, Json +from antlion.validation import MapValidator + +MOBLY_CONTROLLER_CONFIG_NAME: str = "Attenuator" +ACTS_CONTROLLER_REFERENCE_NAME = "attenuators" +_ATTENUATOR_OPEN_RETRIES = 3 + + +class Model(enum.StrEnum): + AEROFLEX_TELNET = "aeroflex.telnet" + MINICIRCUITS_HTTP = "minicircuits.http" + MINICIRCUITS_TELNET = "minicircuits.telnet" + + def create(self, instrument_count: int) -> AttenuatorInstrument: + match self: + case Model.AEROFLEX_TELNET: + import antlion.controllers.attenuator_lib.aeroflex.telnet + + return antlion.controllers.attenuator_lib.aeroflex.telnet.AttenuatorInstrument( + instrument_count + ) + case Model.MINICIRCUITS_HTTP: + import antlion.controllers.attenuator_lib.minicircuits.http + + return antlion.controllers.attenuator_lib.minicircuits.http.AttenuatorInstrument( + instrument_count + ) + case Model.MINICIRCUITS_TELNET: + import antlion.controllers.attenuator_lib.minicircuits.telnet + + return antlion.controllers.attenuator_lib.minicircuits.telnet.AttenuatorInstrument( + instrument_count + ) + + +def create(configs: list[ControllerConfig]) -> list[Attenuator]: + attenuators: list[Attenuator] = [] + for config in configs: + c = MapValidator(config) + attn_model = c.get(str, "Model") + protocol = c.get(str, "Protocol", "telnet") + model = Model(f"{attn_model}.{protocol}") + + instrument_count = c.get(int, "InstrumentCount") + attenuator_instrument = model.create(instrument_count) + + address = c.get(str, "Address") + port = c.get(int, "Port") + + for attempt_number in range(1, _ATTENUATOR_OPEN_RETRIES + 1): + try: + attenuator_instrument.open(address, port) + except Exception as e: + logging.error( + "Attempt %s to open connection to attenuator " "failed: %s", + attempt_number, + e, + ) + if attempt_number == _ATTENUATOR_OPEN_RETRIES: + ping_output = job.run( + f"ping {address} -c 1 -w 1", ignore_status=True + ) + if ping_output.returncode == 1: + logging.error("Unable to ping attenuator at %s", address) + else: + logging.error("Able to ping attenuator at %s", address) + job.run( + ["telnet", address, str(port)], + stdin=b"q", + ignore_status=True, + ) + raise + for i in range(instrument_count): + attenuators.append(Attenuator(attenuator_instrument, idx=i)) + return attenuators + + +def destroy(objects: list[Attenuator]) -> None: + for attn in objects: + attn.instrument.close() + + +def get_info(objects: list[Attenuator]) -> list[Json]: + """Get information on a list of Attenuator objects. + + Args: + attenuators: A list of Attenuator objects. + + Returns: + A list of dict, each representing info for Attenuator objects. + """ + return [ + { + "Address": attenuator.instrument.address, + "Attenuator_Port": attenuator.idx, + } + for attenuator in objects + ] + + +def get_attenuators_for_device( + device_attenuator_configs: list[ControllerConfig], + attenuators: list[Attenuator], + attenuator_key: str, +) -> list[Attenuator]: + """Gets the list of attenuators associated to a specified device and builds + a list of the attenuator objects associated to the ip address in the + device's section of the ACTS config and the Attenuator's IP address. In the + example below the access point object has an attenuator dictionary with + IP address associated to an attenuator object. The address is the only + mandatory field and the 'attenuator_ports_wifi_2g' and + 'attenuator_ports_wifi_5g' are the attenuator_key specified above. These + can be anything and is sent in as a parameter to this function. The numbers + in the list are ports that are in the attenuator object. Below is an + standard Access_Point object and the link to a standard Attenuator object. + Notice the link is the IP address, which is why the IP address is mandatory. + + "AccessPoint": [ + { + "ssh_config": { + "user": "root", + "host": "192.168.42.210" + }, + "Attenuator": [ + { + "Address": "192.168.42.200", + "attenuator_ports_wifi_2g": [ + 0, + 1, + 3 + ], + "attenuator_ports_wifi_5g": [ + 0, + 1 + ] + } + ] + } + ], + "Attenuator": [ + { + "Model": "minicircuits", + "InstrumentCount": 4, + "Address": "192.168.42.200", + "Port": 23 + } + ] + Args: + device_attenuator_configs: A list of attenuators config information in + the acts config that are associated a particular device. + attenuators: A list of all of the available attenuators objects + in the testbed. + attenuator_key: A string that is the key to search in the device's + configuration. + + Returns: + A list of attenuator objects for the specified device and the key in + that device's config. + """ + attenuator_list = [] + for device_attenuator_config in device_attenuator_configs: + c = MapValidator(device_attenuator_config) + ports = c.list(attenuator_key).all(int) + for port in ports: + for attenuator in attenuators: + if ( + attenuator.instrument.address == device_attenuator_config["Address"] + and attenuator.idx is port + ): + attenuator_list.append(attenuator) + return attenuator_list + + +# +# Classes for accessing, managing, and manipulating attenuators. +# +# Users will instantiate a specific child class, but almost all operation should +# be performed on the methods and data members defined here in the base classes +# or the wrapper classes. +# + + +class AttenuatorError(Exception): + """Base class for all errors generated by Attenuator-related modules.""" + + +class InvalidDataError(AttenuatorError): + """ "Raised when an unexpected result is seen on the transport layer. + + When this exception is seen, closing an re-opening the link to the + attenuator instrument is probably necessary. Something has gone wrong in + the transport. + """ + + +class InvalidOperationError(AttenuatorError): + """Raised when the attenuator's state does not allow the given operation. + + Certain methods may only be accessed when the instance upon which they are + invoked is in a certain state. This indicates that the object is not in the + correct state for a method to be called. + """ + + +INVALID_MAX_ATTEN: float = 999.9 + + +@runtime_checkable +class AttenuatorInstrument(Protocol): + """Defines the primitive behavior of all attenuator instruments. + + The AttenuatorInstrument class is designed to provide a simple low-level + interface for accessing any step attenuator instrument comprised of one or + more attenuators and a controller. All AttenuatorInstruments should override + all the methods below and call AttenuatorInstrument.__init__ in their + constructors. Outside of setup/teardown, devices should be accessed via + this generic "interface". + """ + + @property + def address(self) -> str | None: + """Return the address to the attenuator.""" + ... + + @property + def num_atten(self) -> int: + """Return the index used to identify this attenuator in an instrument.""" + ... + + @property + def max_atten(self) -> float: + """Return the maximum allowed attenuation value.""" + ... + + def open(self, host: str, port: int, timeout_sec: int = 5) -> None: + """Initiate a connection to the attenuator. + + Args: + host: A valid hostname to an attenuator + port: Port number to attempt connection + timeout_sec: Seconds to wait to initiate a connection + """ + ... + + def close(self) -> None: + """Close the connection to the attenuator.""" + ... + + def set_atten( + self, idx: int, value: float, strict: bool = True, retry: bool = False + ) -> None: + """Sets the attenuation given its index in the instrument. + + Args: + idx: Index used to identify a particular attenuator in an instrument + value: Value for nominal attenuation to be set + strict: If True, raise an error when given out of bounds attenuation + retry: If True, command will be retried if possible + """ + ... + + def get_atten(self, idx: int, retry: bool = False) -> float: + """Returns the current attenuation given its index in the instrument. + + Args: + idx: Index used to identify a particular attenuator in an instrument + retry: If True, command will be retried if possible + + Returns: + The current attenuation value + """ + ... + + +class Attenuator(object): + """An object representing a single attenuator in a remote instrument. + + A user wishing to abstract the mapping of attenuators to physical + instruments should use this class, which provides an object that abstracts + the physical implementation and allows the user to think only of attenuators + regardless of their location. + """ + + def __init__( + self, instrument: AttenuatorInstrument, idx: int = 0, offset: int = 0 + ) -> None: + """This is the constructor for Attenuator + + Args: + instrument: Reference to an AttenuatorInstrument on which the + Attenuator resides + idx: This zero-based index is the identifier for a particular + attenuator in an instrument. + offset: A power offset value for the attenuator to be used when + performing future operations. This could be used for either + calibration or to allow group operations with offsets between + various attenuators. + + Raises: + TypeError if an invalid AttenuatorInstrument is passed in. + IndexError if the index is out of range. + """ + if not isinstance(instrument, AttenuatorInstrument): + raise TypeError("Must provide an Attenuator Instrument Ref") + self.instrument = instrument + self.idx = idx + self.offset = offset + + if self.idx >= instrument.num_atten: + raise IndexError("Attenuator index out of range for attenuator instrument") + + def set_atten(self, value: float, strict: bool = True, retry: bool = False) -> None: + """Sets the attenuation. + + Args: + value: A floating point value for nominal attenuation to be set. + strict: if True, function raises an error when given out of + bounds attenuation values, if false, the function sets out of + bounds values to 0 or max_atten. + retry: if True, command will be retried if possible + + Raises: + ValueError if value + offset is greater than the maximum value. + """ + if value + self.offset > self.instrument.max_atten and strict: + raise ValueError("Attenuator Value+Offset greater than Max Attenuation!") + + self.instrument.set_atten( + self.idx, value + self.offset, strict=strict, retry=retry + ) + + def get_atten(self, retry: bool = False) -> float: + """Returns the attenuation as a float, normalized by the offset.""" + return self.instrument.get_atten(self.idx, retry) - self.offset + + def get_max_atten(self) -> float: + """Returns the max attenuation as a float, normalized by the offset.""" + if self.instrument.max_atten == INVALID_MAX_ATTEN: + raise ValueError("Invalid Max Attenuator Value") + + return self.instrument.max_atten - self.offset
diff --git a/src/antlion/controllers/attenuator_lib/__init__.py b/packages/antlion/controllers/attenuator_lib/__init__.py similarity index 100% rename from src/antlion/controllers/attenuator_lib/__init__.py rename to packages/antlion/controllers/attenuator_lib/__init__.py
diff --git a/src/antlion/controllers/attenuator_lib/_tnhelper.py b/packages/antlion/controllers/attenuator_lib/_tnhelper.py similarity index 64% rename from src/antlion/controllers/attenuator_lib/_tnhelper.py rename to packages/antlion/controllers/attenuator_lib/_tnhelper.py index 61b4193..8ea8289 100644 --- a/src/antlion/controllers/attenuator_lib/_tnhelper.py +++ b/packages/antlion/controllers/attenuator_lib/_tnhelper.py
@@ -19,8 +19,9 @@ """ import logging -import telnetlib import re +import telnetlib + from antlion.controllers import attenuator from antlion.libs.proc import job @@ -29,40 +30,44 @@ return str(uc_string).encode("ASCII") -class _TNHelper(object): +class TelnetHelper(object): """An internal helper class for Telnet+SCPI command-based instruments. It should only be used by those implementation control libraries and not by any user code directly. """ - def __init__(self, tx_cmd_separator="\n", rx_cmd_separator="\n", prompt=""): - self._tn = None - self._ip_address = None - self._port = None + def __init__( + self, + tx_cmd_separator: str = "\n", + rx_cmd_separator: str = "\n", + prompt: str = "", + ) -> None: + self._tn: telnetlib.Telnet | None = None + self._ip_address: str | None = None + self._port: int | None = None self.tx_cmd_separator = tx_cmd_separator self.rx_cmd_separator = rx_cmd_separator self.prompt = prompt - def open(self, host, port=23): + def open(self, host: str, port: int = 23) -> None: self._ip_address = host self._port = port if self._tn: self._tn.close() - logging.debug("Telnet Server IP = %s" % host) - self._tn = telnetlib.Telnet() - self._tn.open(host, port, 10) + logging.debug("Telnet Server IP = %s", host) + self._tn = telnetlib.Telnet(host, port, timeout=10) - def is_open(self): - return bool(self._tn) + def is_open(self) -> bool: + return self._tn is not None - def close(self): + def close(self) -> None: if self._tn: self._tn.close() self._tn = None - def diagnose_telnet(self): + def diagnose_telnet(self, host: str, port: int) -> bool: """Function that diagnoses telnet connections. This function diagnoses telnet connections and can be used in case of @@ -77,32 +82,32 @@ """ logging.debug("Diagnosing telnet connection") try: - job_result = job.run("ping {} -c 5 -i 0.2".format(self._ip_address)) - except: - logging.error("Unable to ping telnet server.") + job_result = job.run(f"ping {host} -c 5 -i 0.2") + except Exception as e: + logging.error("Unable to ping telnet server: %s", e) return False - ping_output = job_result.stdout + ping_output = job_result.stdout.decode("utf-8") if not re.search(r" 0% packet loss", ping_output): - logging.error("Ping Packets Lost. Result: {}".format(ping_output)) + logging.error("Ping Packets Lost. Result: %s", ping_output) return False try: self.close() - except: - logging.error("Cannot close telnet connection.") + except Exception as e: + logging.error("Cannot close telnet connection: %s", e) return False try: - self.open(self._ip_address, self._port) - except: - logging.error("Cannot reopen telnet connection.") + self.open(host, port) + except Exception as e: + logging.error("Cannot reopen telnet connection: %s", e) return False logging.debug("Telnet connection likely recovered") return True - def cmd(self, cmd_str, wait_ret=True, retry=False): + def cmd(self, cmd_str: str, retry: bool = False) -> str: if not isinstance(cmd_str, str): raise TypeError("Invalid command string", cmd_str) - if not self.is_open(): + if self._tn is None or self._ip_address is None or self._port is None: raise attenuator.InvalidOperationError( "Telnet connection not open for commands" ) @@ -111,29 +116,25 @@ self._tn.read_until(_ascii_string(self.prompt), 2) self._tn.write(_ascii_string(cmd_str + self.tx_cmd_separator)) - if wait_ret is False: - return None - match_idx, match_val, ret_text = self._tn.expect( - [_ascii_string("\S+" + self.rx_cmd_separator)], 1 + [_ascii_string(f"\\S+{self.rx_cmd_separator}")], 1 ) - logging.debug("Telnet Command: {}".format(cmd_str)) - logging.debug("Telnet Reply: ({},{},{})".format(match_idx, match_val, ret_text)) + logging.debug("Telnet Command: %s", cmd_str) + logging.debug("Telnet Reply: (%s, %s, %s)", match_idx, match_val, ret_text) if match_idx == -1: - telnet_recovered = self.diagnose_telnet() + telnet_recovered = self.diagnose_telnet(self._ip_address, self._port) if telnet_recovered and retry: logging.debug("Retrying telnet command once.") - return self.cmd(cmd_str, wait_ret, retry=False) + return self.cmd(cmd_str, retry=False) else: raise attenuator.InvalidDataError( "Telnet command failed to return valid data" ) - ret_text = ret_text.decode() - ret_text = ret_text.strip( + ret_str = ret_text.decode() + ret_str = ret_str.strip( self.tx_cmd_separator + self.rx_cmd_separator + self.prompt ) - - return ret_text + return ret_str
diff --git a/src/antlion/controllers/attenuator_lib/aeroflex/__init__.py b/packages/antlion/controllers/attenuator_lib/aeroflex/__init__.py similarity index 100% rename from src/antlion/controllers/attenuator_lib/aeroflex/__init__.py rename to packages/antlion/controllers/attenuator_lib/aeroflex/__init__.py
diff --git a/packages/antlion/controllers/attenuator_lib/aeroflex/telnet.py b/packages/antlion/controllers/attenuator_lib/aeroflex/telnet.py new file mode 100644 index 0000000..f4544f3 --- /dev/null +++ b/packages/antlion/controllers/attenuator_lib/aeroflex/telnet.py
@@ -0,0 +1,136 @@ +#!/usr/bin/env python3 + +# Copyright 2022 The Fuchsia Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Class for Telnet control of Aeroflex 832X and 833X Series Attenuator Modules + +This class provides a wrapper to the Aeroflex attenuator modules for purposes +of simplifying and abstracting control down to the basic necessities. It is +not the intention of the module to expose all functionality, but to allow +interchangeable HW to be used. + +See http://www.aeroflex.com/ams/weinschel/PDFILES/IM-608-Models-8320-&-8321-preliminary.pdf +""" + +from antlion.controllers import attenuator +from antlion.controllers.attenuator_lib import _tnhelper + + +class AttenuatorInstrument(attenuator.AttenuatorInstrument): + def __init__(self, num_atten: int = 0) -> None: + self._num_atten = num_atten + self._max_atten = attenuator.INVALID_MAX_ATTEN + + self._tnhelper = _tnhelper.TelnetHelper( + tx_cmd_separator="\r\n", rx_cmd_separator="\r\n", prompt=">" + ) + self._properties: dict[str, str] | None = None + self._address: str | None = None + + @property + def address(self) -> str | None: + return self._address + + @property + def num_atten(self) -> int: + return self._num_atten + + @property + def max_atten(self) -> float: + return self._max_atten + + def open(self, host: str, port: int, _timeout_sec: int = 5) -> None: + """Initiate a connection to the attenuator. + + Args: + host: A valid hostname to an attenuator + port: Port number to attempt connection + timeout_sec: Seconds to wait to initiate a connection + """ + self._tnhelper.open(host, port) + + # work around a bug in IO, but this is a good thing to do anyway + self._tnhelper.cmd("*CLS", False) + self._address = host + + if self._num_atten == 0: + self._num_atten = int(self._tnhelper.cmd("RFCONFIG? CHAN")) + + configstr = self._tnhelper.cmd("RFCONFIG? ATTN 1") + + self._properties = dict( + zip( + ["model", "max_atten", "min_step", "unknown", "unknown2", "cfg_str"], + configstr.split(", ", 5), + ) + ) + + self._max_atten = float(self._properties["max_atten"]) + + def close(self) -> None: + """Close the connection to the attenuator.""" + self._tnhelper.close() + + def set_atten( + self, idx: int, value: float, _strict: bool = True, _retry: bool = False + ) -> None: + """Sets the attenuation given its index in the instrument. + + Args: + idx: Index used to identify a particular attenuator in an instrument + value: Value for nominal attenuation to be set + strict: If True, raise an error when given out of bounds attenuation + retry: If True, command will be retried if possible + + Raises: + InvalidOperationError if the telnet connection is not open. + IndexError if the index is not valid for this instrument. + ValueError if the requested set value is greater than the maximum + attenuation value. + """ + if not self._tnhelper.is_open(): + raise attenuator.InvalidOperationError("Connection not open!") + + if idx >= self._num_atten: + raise IndexError("Attenuator index out of range!", self._num_atten, idx) + + if value > self._max_atten: + raise ValueError("Attenuator value out of range!", self._max_atten, value) + + self._tnhelper.cmd(f"ATTN {idx + 1} {value}", False) + + def get_atten(self, idx: int, _retry: bool = False) -> float: + """Returns the current attenuation given its index in the instrument. + + Args: + idx: Index used to identify a particular attenuator in an instrument + retry: If True, command will be retried if possible + + Raises: + InvalidOperationError if the telnet connection is not open. + + Returns: + The current attenuation value + """ + if not self._tnhelper.is_open(): + raise attenuator.InvalidOperationError("Connection not open!") + + # Potentially redundant safety check removed for the moment + # if idx >= self.num_atten: + # raise IndexError("Attenuator index out of range!", self.num_atten, idx) + + atten_val = self._tnhelper.cmd(f"ATTN? {idx + 1}") + + return float(atten_val)
diff --git a/src/antlion/controllers/attenuator_lib/minicircuits/__init__.py b/packages/antlion/controllers/attenuator_lib/minicircuits/__init__.py similarity index 100% rename from src/antlion/controllers/attenuator_lib/minicircuits/__init__.py rename to packages/antlion/controllers/attenuator_lib/minicircuits/__init__.py
diff --git a/packages/antlion/controllers/attenuator_lib/minicircuits/http.py b/packages/antlion/controllers/attenuator_lib/minicircuits/http.py new file mode 100644 index 0000000..98118ad --- /dev/null +++ b/packages/antlion/controllers/attenuator_lib/minicircuits/http.py
@@ -0,0 +1,158 @@ +#!/usr/bin/env python3 + +# Copyright 2022 The Fuchsia Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Class for HTTP control of Mini-Circuits RCDAT series attenuators + +This class provides a wrapper to the MC-RCDAT attenuator modules for purposes +of simplifying and abstracting control down to the basic necessities. It is +not the intention of the module to expose all functionality, but to allow +interchangeable HW to be used. + +See http://www.minicircuits.com/softwaredownload/Prog_Manual-6-Programmable_Attenuator.pdf +""" + +import urllib.request + +from antlion.controllers import attenuator + + +class AttenuatorInstrument(attenuator.AttenuatorInstrument): + """A specific HTTP-controlled implementation of AttenuatorInstrument for + Mini-Circuits RC-DAT attenuators. + + With the exception of HTTP-specific commands, all functionality is defined + by the AttenuatorInstrument class. + """ + + def __init__(self, num_atten: int = 1) -> None: + self._num_atten = num_atten + self._max_atten = attenuator.INVALID_MAX_ATTEN + + self._ip_address: str | None = None + self._port: int | None = None + self._timeout: int | None = None + self._address: str | None = None + + @property + def address(self) -> str | None: + return self._address + + @property + def num_atten(self) -> int: + return self._num_atten + + @property + def max_atten(self) -> float: + return self._max_atten + + def open(self, host: str, port: int = 80, timeout_sec: int = 2) -> None: + """Initiate a connection to the attenuator. + + Args: + host: A valid hostname to an attenuator + port: Port number to attempt connection + timeout_sec: Seconds to wait to initiate a connection + """ + self._ip_address = host + self._port = port + self._timeout = timeout_sec + self._address = host + + att_req = urllib.request.urlopen(f"http://{self._ip_address}:{self._port}/MN?") + config_str = att_req.read().decode("utf-8").strip() + if not config_str.startswith("MN="): + raise attenuator.InvalidDataError( + f"Attenuator returned invalid data. Attenuator returned: {config_str}" + ) + + config_str = config_str[len("MN=") :] + properties = dict( + zip(["model", "max_freq", "max_atten"], config_str.split("-", 2)) + ) + self._max_atten = float(properties["max_atten"]) + + def close(self) -> None: + """Close the connection to the attenuator.""" + # Since this controller is based on HTTP requests, there is no + # connection teardown required. + + def set_atten( + self, idx: int, value: float, strict: bool = True, retry: bool = False + ) -> None: + """Sets the attenuation given its index in the instrument. + + Args: + idx: Index used to identify a particular attenuator in an instrument + value: Value for nominal attenuation to be set + strict: If True, raise an error when given out of bounds attenuation + retry: If True, command will be retried if possible + + Raises: + InvalidDataError if the attenuator does not respond with the + expected output. + """ + if not (0 <= idx < self._num_atten): + raise IndexError("Attenuator index out of range!", self._num_atten, idx) + + if value > self._max_atten and strict: + raise ValueError("Attenuator value out of range!", self._max_atten, value) + # The actual device uses one-based index for channel numbers. + adjusted_value = min(max(0, value), self._max_atten) + att_req = urllib.request.urlopen( + "http://{}:{}/CHAN:{}:SETATT:{}".format( + self._ip_address, self._port, idx + 1, adjusted_value + ), + timeout=self._timeout, + ) + att_resp = att_req.read().decode("utf-8").strip() + if att_resp != "1": + if retry: + self.set_atten(idx, value, strict, retry=False) + else: + raise attenuator.InvalidDataError( + f"Attenuator returned invalid data. Attenuator returned: {att_resp}" + ) + + def get_atten(self, idx: int, retry: bool = False) -> float: + """Returns the current attenuation of the attenuator at the given index. + + Args: + idx: The index of the attenuator. + retry: if True, command will be retried if possible + + Raises: + InvalidDataError if the attenuator does not respond with the + expected output + + Returns: + the current attenuation value as a float + """ + if not (0 <= idx < self._num_atten): + raise IndexError("Attenuator index out of range!", self._num_atten, idx) + att_req = urllib.request.urlopen( + f"http://{self._ip_address}:{self._port}/CHAN:{idx + 1}:ATT?", + timeout=self._timeout, + ) + att_resp = att_req.read().decode("utf-8").strip() + try: + return float(att_resp) + except TypeError as e: + if retry: + return self.get_atten(idx, retry=False) + + raise attenuator.InvalidDataError( + f"Attenuator returned invalid data. Attenuator returned: {att_resp}" + ) from e
diff --git a/packages/antlion/controllers/attenuator_lib/minicircuits/telnet.py b/packages/antlion/controllers/attenuator_lib/minicircuits/telnet.py new file mode 100644 index 0000000..bd70386 --- /dev/null +++ b/packages/antlion/controllers/attenuator_lib/minicircuits/telnet.py
@@ -0,0 +1,148 @@ +#!/usr/bin/env python3 + +# Copyright 2022 The Fuchsia Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Class for Telnet control of Mini-Circuits RCDAT series attenuators + +This class provides a wrapper to the MC-RCDAT attenuator modules for purposes +of simplifying and abstracting control down to the basic necessities. It is +not the intention of the module to expose all functionality, but to allow +interchangeable HW to be used. + +See http://www.minicircuits.com/softwaredownload/Prog_Manual-6-Programmable_Attenuator.pdf +""" + +from antlion.controllers import attenuator +from antlion.controllers.attenuator_lib import _tnhelper + + +class AttenuatorInstrument(attenuator.AttenuatorInstrument): + """A specific telnet-controlled implementation of AttenuatorInstrument for + Mini-Circuits RC-DAT attenuators. + + With the exception of telnet-specific commands, all functionality is defined + by the AttenuatorInstrument class. Because telnet is a stateful protocol, + the functionality of AttenuatorInstrument is contingent upon a telnet + connection being established. + """ + + def __init__(self, num_atten: int = 0) -> None: + self._num_atten = num_atten + self._max_atten = attenuator.INVALID_MAX_ATTEN + self.properties: dict[str, str] | None = None + self._tnhelper = _tnhelper.TelnetHelper( + tx_cmd_separator="\r\n", rx_cmd_separator="\r\n", prompt="" + ) + self._address: str | None = None + + @property + def address(self) -> str | None: + return self._address + + @property + def num_atten(self) -> int: + return self._num_atten + + @property + def max_atten(self) -> float: + return self._max_atten + + def __del__(self) -> None: + if self._tnhelper.is_open(): + self.close() + + def open(self, host: str, port: int, _timeout_sec: int = 5) -> None: + """Initiate a connection to the attenuator. + + Args: + host: A valid hostname to an attenuator + port: Port number to attempt connection + timeout_sec: Seconds to wait to initiate a connection + """ + self._tnhelper.open(host, port) + self._address = host + + if self._num_atten == 0: + self._num_atten = 1 + + config_str = self._tnhelper.cmd("MN?") + + if config_str.startswith("MN="): + config_str = config_str[len("MN=") :] + + self.properties = dict( + zip(["model", "max_freq", "max_atten"], config_str.split("-", 2)) + ) + self._max_atten = float(self.properties["max_atten"]) + + def close(self) -> None: + """Close the connection to the attenuator.""" + self._tnhelper.close() + + def set_atten( + self, idx: int, value: float, strict: bool = True, retry: bool = False + ) -> None: + """Sets the attenuation given its index in the instrument. + + Args: + idx: Index used to identify a particular attenuator in an instrument + value: Value for nominal attenuation to be set + strict: If True, raise an error when given out of bounds attenuation + retry: If True, command will be retried if possible + + Raises: + InvalidOperationError if the telnet connection is not open. + IndexError if the index is not valid for this instrument. + ValueError if the requested set value is greater than the maximum + attenuation value. + """ + + if not self._tnhelper.is_open(): + raise attenuator.InvalidOperationError("Connection not open!") + + if idx >= self._num_atten: + raise IndexError("Attenuator index out of range!", self._num_atten, idx) + + if value > self._max_atten and strict: + raise ValueError("Attenuator value out of range!", self._max_atten, value) + # The actual device uses one-based index for channel numbers. + adjusted_value = min(max(0, value), self._max_atten) + self._tnhelper.cmd(f"CHAN:{idx + 1}:SETATT:{adjusted_value}", retry=retry) + + def get_atten(self, idx: int, retry: bool = False) -> float: + """Returns the current attenuation given its index in the instrument. + + Args: + idx: Index used to identify a particular attenuator in an instrument + retry: If True, command will be retried if possible + + Returns: + The current attenuation value + + Raises: + InvalidOperationError if the telnet connection is not open. + """ + if not self._tnhelper.is_open(): + raise attenuator.InvalidOperationError("Connection not open!") + + if idx >= self._num_atten or idx < 0: + raise IndexError("Attenuator index out of range!", self._num_atten, idx) + + if self._num_atten == 1: + atten_val_str = self._tnhelper.cmd(":ATT?", retry=retry) + else: + atten_val_str = self._tnhelper.cmd(f"CHAN:{idx + 1}:ATT?", retry=retry) + atten_val = float(atten_val_str) + return atten_val
diff --git a/src/antlion/controllers/fastboot.py b/packages/antlion/controllers/fastboot.py similarity index 91% rename from src/antlion/controllers/fastboot.py rename to packages/antlion/controllers/fastboot.py index ed67245..40fa702 100755 --- a/src/antlion/controllers/fastboot.py +++ b/packages/antlion/controllers/fastboot.py
@@ -14,9 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from antlion.libs.proc import job - from antlion import error +from antlion.libs.proc import job class FastbootError(error.ActsError): @@ -47,17 +46,19 @@ def __init__(self, serial="", ssh_connection=None): self.serial = serial if serial: - self.fastboot_str = "fastboot -s {}".format(serial) + self.fastboot_str = f"fastboot -s {serial}" else: self.fastboot_str = "fastboot" self.ssh_connection = ssh_connection def _exec_fastboot_cmd(self, name, arg_str, ignore_status=False, timeout=60): - command = " ".join((self.fastboot_str, name, arg_str)) + command = f"{self.fastboot_str} {name} {arg_str}" if self.ssh_connection: - result = self.connection.run(command, ignore_status=True, timeout=timeout) + result = self.ssh_connection.run( + command, ignore_status=True, timeout_sec=timeout + ) else: - result = job.run(command, ignore_status=True, timeout=timeout) + result = job.run(command, ignore_status=True, timeout_sec=timeout) ret, out, err = result.exit_status, result.stdout, result.stderr # TODO: This is only a temporary workaround for b/34815412. # fastboot getvar outputs to stderr instead of stdout
diff --git a/packages/antlion/controllers/fuchsia_device.py b/packages/antlion/controllers/fuchsia_device.py new file mode 100644 index 0000000..984ec3b --- /dev/null +++ b/packages/antlion/controllers/fuchsia_device.py
@@ -0,0 +1,832 @@ +#!/usr/bin/env python3 +# +# Copyright 2022 The Fuchsia Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import logging +import os +import re +import socket +import textwrap +import time +from ipaddress import ip_address +from typing import Any + +import honeydew +from honeydew.affordances.connectivity.wlan.utils.types import CountryCode +from honeydew.auxiliary_devices.power_switch_dmc import ( + PowerSwitchDmc, + PowerSwitchDmcError, +) +from honeydew.transports.ffx import FfxConfig +from honeydew.typing.custom_types import DeviceInfo, IpPort +from mobly import logger, signals + +from antlion import context +from antlion import logger as acts_logger +from antlion import utils +from antlion.capabilities.ssh import DEFAULT_SSH_PORT, SSHConfig +from antlion.controllers import pdu +from antlion.controllers.fuchsia_lib.ffx import FFX +from antlion.controllers.fuchsia_lib.lib_controllers.wlan_controller import ( + WlanController, +) +from antlion.controllers.fuchsia_lib.lib_controllers.wlan_policy_controller import ( + WlanPolicyController, +) +from antlion.controllers.fuchsia_lib.package_server import PackageServer +from antlion.controllers.fuchsia_lib.sl4f import SL4F +from antlion.controllers.fuchsia_lib.ssh import ( + DEFAULT_SSH_PRIVATE_KEY, + DEFAULT_SSH_USER, + FuchsiaSSHProvider, +) +from antlion.decorators import cached_property +from antlion.runner import CalledProcessError +from antlion.types import ControllerConfig, Json +from antlion.utils import ( + PingResult, + get_fuchsia_mdns_ipv6_address, + get_interface_ip_addresses, +) +from antlion.validation import FieldNotFoundError, MapValidator + +MOBLY_CONTROLLER_CONFIG_NAME: str = "FuchsiaDevice" +ACTS_CONTROLLER_REFERENCE_NAME = "fuchsia_devices" + +FUCHSIA_RECONNECT_AFTER_REBOOT_TIME = 5 + +FUCHSIA_REBOOT_TYPE_SOFT = "soft" +FUCHSIA_REBOOT_TYPE_HARD = "hard" + +FUCHSIA_DEFAULT_CONNECT_TIMEOUT = 90 +FUCHSIA_DEFAULT_COMMAND_TIMEOUT = 60 + +FUCHSIA_DEFAULT_CLEAN_UP_COMMAND_TIMEOUT = 15 + +FUCHSIA_COUNTRY_CODE_TIMEOUT = 15 +FUCHSIA_DEFAULT_COUNTRY_CODE_US = "US" + +MDNS_LOOKUP_RETRY_MAX = 3 + +FFX_PROXY_TIMEOUT_SEC = 3 + +# Duration to wait for the Fuchsia device to acquire an IP address after +# requested to join a network. +# +# Acquiring an IP address after connecting to a WLAN network could take up to +# 15 seconds if we get unlucky: +# +# 1. An outgoing passive scan just started (~7s) +# 2. An active scan is queued for the newly saved network (~7s) +# 3. The initial connection attempt fails (~1s) +IP_ADDRESS_TIMEOUT = 30 + + +class FuchsiaDeviceError(signals.ControllerError): + pass + + +class FuchsiaConfigError(signals.ControllerError): + """Incorrect FuchsiaDevice configuration.""" + + +def create(configs: list[ControllerConfig]) -> list[FuchsiaDevice]: + return [FuchsiaDevice(c) for c in configs] + + +def destroy(objects: list[FuchsiaDevice]) -> None: + for fd in objects: + fd.clean_up() + del fd + + +def get_info(objects: list[FuchsiaDevice]) -> list[Json]: + """Get information on a list of FuchsiaDevice objects.""" + return [{"ip": fd.ip} for fd in objects] + + +class FuchsiaDevice: + """Class representing a Fuchsia device. + + Each object of this class represents one Fuchsia device in ACTS. + + Attributes: + ip: The full address or Fuchsia abstract name to contact the Fuchsia + device at + log: A logger object. + ssh_port: The SSH TCP port number of the Fuchsia device. + sl4f_port: The SL4F HTTP port number of the Fuchsia device. + ssh_config: The ssh_config for connecting to the Fuchsia device. + """ + + def __init__(self, controller_config: ControllerConfig) -> None: + config = MapValidator(controller_config) + self.ip = config.get(str, "ip") + if "%" in self.ip: + addr, scope_id = self.ip.split("%", 1) + try: + if_name = socket.if_indextoname(int(scope_id)) + self.ip = f"{addr}%{if_name}" + except ValueError: + # Scope ID is likely already the interface name, no change necessary. + pass + self.orig_ip = self.ip + self.sl4f_port = config.get(int, "sl4f_port", 80) + self.ssh_username = config.get(str, "ssh_username", DEFAULT_SSH_USER) + self.ssh_port = config.get(int, "ssh_port", DEFAULT_SSH_PORT) + self.ssh_binary_path = config.get(str, "ssh_binary_path", "ssh") + + def expand(path: str) -> str: + return os.path.expandvars(os.path.expanduser(path)) + + def path_from_config(name: str, default: str | None = None) -> str | None: + path = config.get(str, name, default) + return None if path is None else expand(path) + + def assert_exists(name: str, path: str | None) -> None: + if path is None: + raise FuchsiaDeviceError( + f'Please specify "${name}" in your configuration file' + ) + if not os.path.exists(path): + raise FuchsiaDeviceError( + f'Please specify a correct "${name}" in your configuration ' + f'file: "{path}" does not exist' + ) + + self.specific_image: str | None = path_from_config("specific_image") + if self.specific_image: + assert_exists("specific_image", self.specific_image) + + # Path to a tar.gz archive with pm and amber-files, as necessary for + # starting a package server. + self.packages_archive_path: str | None = path_from_config( + "packages_archive_path" + ) + if self.packages_archive_path: + assert_exists("packages_archive_path", self.packages_archive_path) + + def required_path_from_config(name: str, default: str | None = None) -> str: + path = path_from_config(name, default) + if path is None: + raise FuchsiaConfigError(f"{name} is a required config field") + assert_exists(name, path) + return path + + self.ssh_priv_key: str = required_path_from_config( + "ssh_priv_key", DEFAULT_SSH_PRIVATE_KEY + ) + self.ffx_binary_path: str = required_path_from_config( + "ffx_binary_path", "${FUCHSIA_DIR}/.jiri_root/bin/ffx" + ) + self.ffx_subtools_search_path: str | None = path_from_config( + "ffx_subtools_search_path" + ) + + self.authorized_file = config.get(str, "authorized_file_loc", None) + self.serial_number = config.get(str, "serial_number", None) + self.device_type = config.get(str, "device_type", None) + self.product_type = config.get(str, "product_type", None) + self.board_type = config.get(str, "board_type", None) + self.build_number = config.get(str, "build_number", None) + self.build_type = config.get(str, "build_type", None) + self.mdns_name = config.get(str, "mdns_name", None) + + self.hard_reboot_on_fail = config.get(bool, "hard_reboot_on_fail", False) + self.take_bug_report_on_fail = config.get( + bool, "take_bug_report_on_fail", False + ) + self.device_pdu_config = config.get(dict, "PduDevice", {}) + self.config_country_code = config.get( + str, "country_code", FUCHSIA_DEFAULT_COUNTRY_CODE_US + ).upper() + + output_path = context.get_current_context().get_base_output_path() + self.ssh_config = os.path.join(output_path, f"ssh_config_{self.ip}") + self._generate_ssh_config(self.ssh_config) + + # WLAN interface info is populated inside configure_wlan + self.wlan_client_interfaces: dict[str, Any] = {} + self.wlan_ap_interfaces: dict[str, Any] = {} + self.wlan_client_test_interface_name = config.get( + str, "wlan_client_test_interface", None + ) + self.wlan_ap_test_interface_name = config.get( + str, "wlan_ap_test_interface", None + ) + try: + self.wlan_features: list[str] = config.list("wlan_features").all(str) + except FieldNotFoundError: + self.wlan_features = [] + + # Whether to use 'policy' or 'drivers' for WLAN connect/disconnect calls + # If set to None, wlan is not configured. + self.association_mechanism: str | None = None + # Defaults to policy layer, unless otherwise specified in the config + self.default_association_mechanism = config.get( + str, "association_mechanism", "policy" + ) + + # Whether to clear and preserve existing saved networks and client + # connections state, to be restored at device teardown. + self.default_preserve_saved_networks = config.get( + bool, "preserve_saved_networks", True + ) + + if not utils.is_valid_ipv4_address(self.ip) and not utils.is_valid_ipv6_address( + self.ip + ): + mdns_ip = None + for _ in range(MDNS_LOOKUP_RETRY_MAX): + mdns_ip = get_fuchsia_mdns_ipv6_address(self.ip) + if mdns_ip: + break + else: + time.sleep(1) + if mdns_ip and utils.is_valid_ipv6_address(mdns_ip): + # self.ip was actually an mdns name. Use it for self.mdns_name + # unless one was explicitly provided. + self.mdns_name = self.mdns_name or self.ip + self.ip = mdns_ip + else: + raise ValueError(f"Invalid IP: {self.ip}") + + self.log = logger.PrefixLoggerAdapter( + logging.getLogger(), + { + logger.PrefixLoggerAdapter.EXTRA_KEY_LOG_PREFIX: f"[FuchsiaDevice | {self.orig_ip}]", + }, + ) + + self.ping_rtt_match = re.compile( + r"RTT Min/Max/Avg = \[ ([0-9.]+) / ([0-9.]+) / ([0-9.]+) \] ms" + ) + self.serial = re.sub("[.:%]", "_", self.ip) + self.package_server: PackageServer | None = None + + # Create honeydew fuchsia_device. + if not self.mdns_name: + raise FuchsiaConfigError( + 'Must provide "mdns_name: <device mDNS name>" in the device config' + ) + + ffx_config = FfxConfig() + ffx_config.setup( + binary_path=self.ffx_binary_path, + isolate_dir=None, + logs_dir=f"{logging.log_path}/ffx/", # type: ignore[attr-defined] + logs_level="None", + enable_mdns=False, + subtools_search_path=self.ffx_subtools_search_path, + proxy_timeout_secs=FFX_PROXY_TIMEOUT_SEC, + ) + + self.honeydew_fd = honeydew.create_device( + device_info=DeviceInfo( + name=self.mdns_name, + ip_port=IpPort(ip_address(self.ip), None), + serial_socket=None, + ), + ffx_config=ffx_config.get_config(), + config={ + "affordances": { + "wlan": { + "implementation": "fuchsia-controller", + }, + }, + }, + ) + + @cached_property + def sl4f(self) -> SL4F: + """Get the sl4f module configured for this device.""" + self.log.info("Started SL4F server") + return SL4F(self.ssh, self.sl4f_port) + + @cached_property + def ssh(self) -> FuchsiaSSHProvider: + """Get the SSH provider module configured for this device.""" + if not self.ssh_port: + raise FuchsiaConfigError( + 'Must provide "ssh_port: <int>" in the device config' + ) + if not self.ssh_priv_key: + raise FuchsiaConfigError( + 'Must provide "ssh_priv_key: <file path>" in the device config' + ) + return FuchsiaSSHProvider( + SSHConfig( + self.ssh_username, + self.ip, + self.ssh_priv_key, + port=self.ssh_port, + ssh_binary=self.ssh_binary_path, + ) + ) + + @cached_property + def ffx(self) -> FFX: + """Get the ffx module configured for this device. + + The ffx module uses lazy-initialization; it will initialize an ffx + connection to the device when it is required. + + If ffx needs to be reinitialized, delete the "ffx" property and attempt + access again. Note re-initialization will interrupt any running ffx + calls. + """ + if not self.mdns_name: + raise FuchsiaConfigError( + 'Must provide "mdns_name: <device mDNS name>" in the device config' + ) + return FFX( + self.ffx_binary_path, + self.mdns_name, + self.ip, + self.ssh_priv_key, + self.ffx_subtools_search_path, + ) + + @ffx.deleter + # TODO(https://github.com/python/mypy/issues/11008): Rename to ffx + def ffx_deleter(self, ffx: FFX) -> None: + self.log.debug("Cleaning up ffx") + ffx.clean_up() + + @cached_property + def wlan_policy_controller(self) -> WlanPolicyController: + return WlanPolicyController(self.honeydew_fd, self.ssh) + + @cached_property + def wlan_controller(self) -> WlanController: + return WlanController(self.honeydew_fd) + + def _generate_ssh_config(self, file_path: str) -> None: + """Generate and write an SSH config for Fuchsia to disk. + + Args: + file_path: Path to write the generated SSH config + """ + content = textwrap.dedent( + f"""\ + Host * + CheckHostIP no + StrictHostKeyChecking no + ForwardAgent no + ForwardX11 no + GSSAPIDelegateCredentials no + UserKnownHostsFile /dev/null + User fuchsia + IdentitiesOnly yes + IdentityFile {self.ssh_priv_key} + ControlPersist yes + ControlMaster auto + ControlPath /tmp/fuchsia--%r@%h:%p + ServerAliveInterval 1 + ServerAliveCountMax 1 + LogLevel ERROR + """ + ) + + with open(file_path, "w", encoding="utf-8") as file: + file.write(content) + + def start_package_server(self) -> None: + if not self.packages_archive_path: + self.log.warn( + "packages_archive_path is not specified. " + "Assuming a package server is already running and configured on " + "the DUT. If this is not the case, either run your own package " + "server, or configure these fields appropriately. " + "This is usually required for the Fuchsia iPerf3 client or " + "other testing utilities not on device cache." + ) + return + if self.package_server: + self.log.warn( + "Skipping to start the package server since is already running" + ) + return + + self.package_server = PackageServer(self.packages_archive_path) + self.package_server.start() + self.package_server.configure_device(self.ssh) + + def update_wlan_interfaces(self) -> None: + """Retrieves WLAN interfaces from device and sets the FuchsiaDevice + attributes. + """ + wlan_interfaces = self.wlan_controller.get_interfaces_by_role() + self.wlan_client_interfaces = wlan_interfaces.client + self.wlan_ap_interfaces = wlan_interfaces.ap + + # Set test interfaces to value from config, else the first found + # interface, else None + if self.wlan_client_test_interface_name is None: + self.wlan_client_test_interface_name = next( + iter(self.wlan_client_interfaces), None + ) + + if self.wlan_ap_test_interface_name is None: + self.wlan_ap_test_interface_name = next(iter(self.wlan_ap_interfaces), None) + + def configure_wlan( + self, + association_mechanism: str | None = None, + preserve_saved_networks: bool | None = None, + ) -> None: + """ + Readies device for WLAN functionality. If applicable, connects to the + policy layer and clears/saves preexisting saved networks. + + Args: + association_mechanism: either 'policy' or 'drivers'. If None, uses + the default value from init (can be set by ACTS config) + preserve_saved_networks: whether to clear existing saved + networks, and preserve them for restoration later. If None, uses + the default value from init (can be set by ACTS config) + + Raises: + FuchsiaDeviceError, if configuration fails + """ + self.wlan_controller.set_country_code(CountryCode(self.config_country_code)) + + # If args aren't provided, use the defaults, which can be set in the + # config. + if association_mechanism is None: + association_mechanism = self.default_association_mechanism + if preserve_saved_networks is None: + preserve_saved_networks = self.default_preserve_saved_networks + + if association_mechanism not in {None, "policy", "drivers"}: + raise FuchsiaDeviceError( + f"Invalid FuchsiaDevice association_mechanism: {association_mechanism}" + ) + + # Allows for wlan to be set up differently in different tests + if self.association_mechanism: + self.log.info("Deconfiguring WLAN") + self.deconfigure_wlan() + + self.association_mechanism = association_mechanism + + self.log.info( + "Configuring WLAN w/ association mechanism: " f"{association_mechanism}" + ) + if association_mechanism == "drivers": + self.log.warn( + "You may encounter unusual device behavior when using the " + "drivers directly for WLAN. This should be reserved for " + "debugging specific issues. Normal test runs should use the " + "policy layer." + ) + if preserve_saved_networks: + self.log.warn( + "Unable to preserve saved networks when using drivers " + "association mechanism (requires policy layer control)." + ) + else: + # This requires SL4F calls, so it can only happen with actual + # devices, not with unit tests. + self.wlan_policy_controller.configure_wlan(preserve_saved_networks) + + # Retrieve WLAN client and AP interfaces + self.update_wlan_interfaces() + + def deconfigure_wlan(self) -> None: + """ + Stops WLAN functionality (if it has been started). Used to allow + different tests to use WLAN differently (e.g. some tests require using + wlan policy, while the abstract wlan_device can be setup to use policy + or drivers) + + Raises: + FuchsiaDeviceError, if deconfigure fails. + """ + if not self.association_mechanism: + self.log.warning("WLAN not configured before deconfigure was called.") + return + # If using policy, stop client connections. Otherwise, just clear + # variables. + if self.association_mechanism != "drivers": + self.wlan_policy_controller._deconfigure_wlan() + self.association_mechanism = None + + def reboot( + self, + unreachable_timeout: int = FUCHSIA_DEFAULT_CONNECT_TIMEOUT, + reboot_type: str = FUCHSIA_REBOOT_TYPE_SOFT, + testbed_pdus: list[pdu.PduDevice] | None = None, + ) -> None: + """Reboot a FuchsiaDevice. + + Soft reboots the device, verifies it becomes unreachable, then verifies + it comes back online. Re-initializes services so the tests can continue. + + Args: + use_ssh: if True, use fuchsia shell command via ssh to reboot + instead of SL4F. + unreachable_timeout: time to wait for device to become unreachable. + reboot_type: 'soft' or 'hard'. + testbed_pdus: all testbed PDUs. + + Raises: + ConnectionError, if device fails to become unreachable or fails to + come back up. + """ + if reboot_type == FUCHSIA_REBOOT_TYPE_SOFT: + self.log.info("Soft rebooting") + self.honeydew_fd.reboot() + + elif reboot_type == FUCHSIA_REBOOT_TYPE_HARD: + self.log.info("Hard rebooting via PDU") + + # Use dmc (client of DMS, device management server) if available + # for rebooting the device. This tool is only available when + # running in Fuchsia infrastructure. + dmc: PowerSwitchDmc | None = None + if self.mdns_name: + try: + dmc = PowerSwitchDmc(device_name=self.mdns_name) + except PowerSwitchDmcError: + self.log.info("dmc not found, falling back to using PDU") + + if dmc: + self.log.info("Killing power to FuchsiaDevice with dmc") + dmc.power_off() + self.honeydew_fd.wait_for_offline() + + self.log.info("Restoring power to FuchsiaDevice with dmc") + dmc.power_on() + self.honeydew_fd.wait_for_online() + self.honeydew_fd.on_device_boot() + else: + # Find the matching PDU in the Mobly config. + if not testbed_pdus: + raise AttributeError( + "Testbed PDUs must be supplied to hard reboot a fuchsia_device." + ) + device_pdu, device_pdu_port = pdu.get_pdu_port_for_device( + self.device_pdu_config, testbed_pdus + ) + + self.log.info("Killing power to FuchsiaDevice") + device_pdu.off(device_pdu_port) + self.honeydew_fd.wait_for_offline() + + self.log.info("Restoring power to FuchsiaDevice") + device_pdu.on(device_pdu_port) + self.honeydew_fd.wait_for_online() + self.honeydew_fd.on_device_boot() + + else: + raise ValueError(f"Invalid reboot type: {reboot_type}") + + # Cleanup services + self.stop_services() + + # TODO(http://b/246852449): Move configure_wlan to other controllers. + # If wlan was configured before reboot, it must be configured again + # after rebooting, as it was before reboot. No preserving should occur. + if self.association_mechanism: + pre_reboot_association_mechanism = self.association_mechanism + # Prevent configure_wlan from thinking it needs to deconfigure first + self.association_mechanism = None + self.configure_wlan( + association_mechanism=pre_reboot_association_mechanism, + preserve_saved_networks=False, + ) + + self.log.info("Device has rebooted") + + def ping( + self, + dest_ip: str, + count: int = 3, + interval: int = 1000, + timeout: int = 1000, + size: int = 25, + additional_ping_params: str | None = None, + ) -> PingResult: + """Pings from a Fuchsia device to an IPv4 address or hostname + + Args: + dest_ip: (str) The ip or hostname to ping. + count: (int) How many icmp packets to send. + interval: (int) How long to wait between pings (ms) + timeout: (int) How long to wait before having the icmp packet + timeout (ms). + size: (int) Size of the icmp packet. + additional_ping_params: (str) command option flags to + append to the command string + + Returns: + A dictionary for the results of the ping. The dictionary contains + the following items: + status: Whether the ping was successful. + rtt_min: The minimum round trip time of the ping. + rtt_max: The minimum round trip time of the ping. + rtt_avg: The avg round trip time of the ping. + stdout: The standard out of the ping command. + stderr: The standard error of the ping command. + """ + self.log.debug(f"Pinging {dest_ip}...") + if not additional_ping_params: + additional_ping_params = "" + + try: + ping_result = self.ssh.run( + f"ping -c {count} -i {interval} -t {timeout} -s {size} " + f"{additional_ping_params} {dest_ip}" + ) + except CalledProcessError as e: + self.log.debug(f"Failed to ping from host: {e}") + return PingResult( + exit_status=e.returncode, + stdout=e.stdout.decode("utf-8"), + stderr=e.stderr.decode("utf-8"), + transmitted=None, + received=None, + time_ms=None, + rtt_min_ms=None, + rtt_avg_ms=None, + rtt_max_ms=None, + rtt_mdev_ms=None, + ) + + rtt_stats: re.Match[str] | None = None + + if not ping_result.stderr: + rtt_lines = ping_result.stdout.decode("utf-8").split("\n")[:-1] + rtt_line = rtt_lines[-1] + rtt_stats = re.search(self.ping_rtt_match, rtt_line) + if rtt_stats is None: + raise FuchsiaDeviceError(f'Unable to parse ping output: "{rtt_line}"') + + return PingResult( + exit_status=ping_result.returncode, + stdout=ping_result.stdout.decode("utf-8"), + stderr=ping_result.stderr.decode("utf-8"), + transmitted=None, + received=None, + time_ms=None, + rtt_min_ms=float(rtt_stats.group(1)) if rtt_stats else None, + rtt_avg_ms=float(rtt_stats.group(3)) if rtt_stats else None, + rtt_max_ms=float(rtt_stats.group(2)) if rtt_stats else None, + rtt_mdev_ms=None, + ) + + def clean_up(self) -> None: + """Cleans up the FuchsiaDevice object, releases any resources it + claimed, and restores saved networks if applicable. For reboots, use + clean_up_services only. + + Note: Any exceptions thrown in this method must be caught and handled, + ensuring that clean_up_services is run. Otherwise, the syslog listening + thread will never join and will leave tests hanging. + """ + # If and only if wlan is configured, and using the policy layer + if self.association_mechanism == "policy": + try: + self.wlan_policy_controller.clean_up() + except Exception as err: + self.log.warning(f"Unable to clean up WLAN Policy layer: {err}") + + self.stop_services() + + if self.package_server: + self.package_server.clean_up() + + def get_interface_ip_addresses(self, interface: str) -> dict[str, list[str]]: + return get_interface_ip_addresses(self, interface) + + def wait_for_ipv4_addr(self, interface: str) -> None: + """Checks if device has an ipv4 private address. Sleeps 1 second between + retries. + + Args: + interface: name of interface from which to get ipv4 address. + + Raises: + ConnectionError, if device does not have an ipv4 address after all + timeout. + """ + self.log.info( + f"Checking for valid ipv4 addr. Retry {IP_ADDRESS_TIMEOUT} seconds." + ) + timeout = time.time() + IP_ADDRESS_TIMEOUT + while time.time() < timeout: + ip_addrs = self.get_interface_ip_addresses(interface) + + if len(ip_addrs["ipv4_private"]) > 0: + self.log.info( + "Device has an ipv4 address: " f"{ip_addrs['ipv4_private'][0]}" + ) + break + else: + self.log.debug( + "Device does not yet have an ipv4 address...retrying in 1 " + "second." + ) + time.sleep(1) + else: + raise ConnectionError("Device failed to get an ipv4 address.") + + def wait_for_ipv6_addr(self, interface: str) -> None: + """Checks if device has an ipv6 private local address. Sleeps 1 second + between retries. + + Args: + interface: name of interface from which to get ipv6 address. + + Raises: + ConnectionError, if device does not have an ipv6 address after all + timeout. + """ + self.log.info( + f"Checking for valid ipv6 addr. Retry {IP_ADDRESS_TIMEOUT} seconds." + ) + timeout = time.time() + IP_ADDRESS_TIMEOUT + while time.time() < timeout: + ip_addrs = self.get_interface_ip_addresses(interface) + if len(ip_addrs["ipv6_private_local"]) > 0: + self.log.info( + "Device has an ipv6 private local address: " + f"{ip_addrs['ipv6_private_local'][0]}" + ) + break + else: + self.log.debug( + "Device does not yet have an ipv6 address...retrying in 1 " + "second." + ) + time.sleep(1) + else: + raise ConnectionError("Device failed to get an ipv6 address.") + + def stop_services(self) -> None: + """Stops all host-side clients to the Fuchsia device. + + This is necessary whenever the device's state is unknown. These cases can be + found after device reboots, for example. + """ + self.log.info("Stopping host device services.") + del self.wlan_policy_controller + del self.wlan_controller + del self.sl4f + del self.ssh + del self.ffx + + def take_bug_report(self) -> None: + """Takes a bug report on the device and stores it in a file.""" + self.log.info(f"Taking snapshot of {self.mdns_name}") + + time_stamp = acts_logger.normalize_log_line_timestamp( + acts_logger.epoch_to_log_line_timestamp(utils.get_current_epoch_time()) + ) + out_dir = context.get_current_context().get_full_output_path() + out_path = os.path.join(out_dir, f"{self.mdns_name}_{time_stamp}.zip") + + try: + with open(out_path, "wb") as file: + snapshot_bytes = self.ssh.run("snapshot", log_output=False).stdout + file.write(snapshot_bytes) + self.log.info(f"Snapshot saved to {out_path}") + except Exception as err: + self.log.error(f"Failed to take snapshot: {err}") + + def take_bt_snoop_log(self, custom_name: str | None = None) -> None: + """Takes a the bt-snoop log from the device and stores it in a file + in a pcap format. + """ + bt_snoop_path = context.get_current_context().get_full_output_path() + time_stamp = acts_logger.normalize_log_line_timestamp( + acts_logger.epoch_to_log_line_timestamp(time.time()) + ) + out_name = "FuchsiaDevice%s_%s" % ( + self.serial, + time_stamp.replace(" ", "_").replace(":", "-"), + ) + out_name = f"{out_name}.pcap" + if custom_name: + out_name = f"{self.serial}_{custom_name}.pcap" + else: + out_name = f"{out_name}.pcap" + full_out_path = os.path.join(bt_snoop_path, out_name) + with open(full_out_path, "wb") as file: + pcap_bytes = self.ssh.run("bt-snoop-cli -d -f pcap").stdout + file.write(pcap_bytes)
diff --git a/src/antlion/controllers/fuchsia_lib/OWNERS b/packages/antlion/controllers/fuchsia_lib/OWNERS similarity index 100% rename from src/antlion/controllers/fuchsia_lib/OWNERS rename to packages/antlion/controllers/fuchsia_lib/OWNERS
diff --git a/src/antlion/controllers/fuchsia_lib/__init__.py b/packages/antlion/controllers/fuchsia_lib/__init__.py similarity index 100% rename from src/antlion/controllers/fuchsia_lib/__init__.py rename to packages/antlion/controllers/fuchsia_lib/__init__.py
diff --git a/src/antlion/controllers/fuchsia_lib/base_lib.py b/packages/antlion/controllers/fuchsia_lib/base_lib.py similarity index 77% rename from src/antlion/controllers/fuchsia_lib/base_lib.py rename to packages/antlion/controllers/fuchsia_lib/base_lib.py index ea7f96e..1171d98 100644 --- a/src/antlion/controllers/fuchsia_lib/base_lib.py +++ b/packages/antlion/controllers/fuchsia_lib/base_lib.py
@@ -15,12 +15,11 @@ # limitations under the License. import json -import socket - +import logging from typing import Any, Mapping from urllib.request import Request, urlopen -from antlion import logger +from mobly.logger import PrefixLoggerAdapter DEFAULT_SL4F_RESPONSE_TIMEOUT_SEC = 30 @@ -36,16 +35,19 @@ class BaseLib: def __init__(self, addr: str, logger_tag: str) -> None: self.address = addr - self.log = logger.create_tagged_trace_logger( - f"SL4F | {self.address} | {logger_tag}" + self.log = PrefixLoggerAdapter( + logging.getLogger(), + { + PrefixLoggerAdapter.EXTRA_KEY_LOG_PREFIX: f"SL4F | {self.address} | {logger_tag}" + }, ) def send_command( self, cmd: str, - args: Mapping[str, Any], - response_timeout: int = DEFAULT_SL4F_RESPONSE_TIMEOUT_SEC, - ) -> Mapping[str, Any]: + args: Mapping[str, object] | None = None, + response_timeout: float = DEFAULT_SL4F_RESPONSE_TIMEOUT_SEC, + ) -> dict[str, Any]: """Builds and sends a JSON command to SL4F server. Args: @@ -74,18 +76,14 @@ data=data_json, headers={ "Content-Type": "application/json; charset=utf-8", - "Content-Length": len(data_json), + "Content-Length": str(len(data_json)), }, ) - self.log.debug(f'Sending request "{cmd}" with {args}') - try: - response = urlopen(req, timeout=response_timeout) - except socket.timeout as e: - # socket.timeout was aliased to TimeoutError in Python 3.10. For - # older versions of Python, we need to cast to TimeoutError to - # provide a version-agnostic API. - raise TimeoutError("socket timeout") from e + self.log.debug( + f'Sending request "{cmd}" with args: {args} with timeout {response_timeout}' + ) + response = urlopen(req, timeout=response_timeout) response_body = response.read().decode("utf-8") try:
diff --git a/packages/antlion/controllers/fuchsia_lib/ffx.py b/packages/antlion/controllers/fuchsia_lib/ffx.py new file mode 100644 index 0000000..6087c54 --- /dev/null +++ b/packages/antlion/controllers/fuchsia_lib/ffx.py
@@ -0,0 +1,447 @@ +#!/usr/bin/env python3 +# +# Copyright 2022 The Fuchsia Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum +import io +import json +import logging +import os +import subprocess +import tempfile +import time +from pathlib import Path, PurePath +from shutil import rmtree + +from mobly import logger, signals +from tenacity import retry +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_fixed + +from antlion import context, utils + +FFX_DEFAULT_COMMAND_TIMEOUT: int = 60 +FFX_CONFIG_TIMEOUT_SEC: float = 20 +FFX_TARGET_ADD_TIMEOUT_SEC: float = 20 +FFX_DAEMON_STOP_TIMEOUT_SEC: float = 4 + + +class FFXError(signals.TestError): + """Non-zero error code returned from a ffx command.""" + + def __init__(self, command: str, process: subprocess.CalledProcessError) -> None: + self.command = command + self.stdout: str = process.stdout.decode("utf-8", errors="replace") + self.stderr: str = process.stderr.decode("utf-8", errors="replace") + self.exit_status = process.returncode + super().__init__(self.__str__()) + + def __str__(self) -> str: + return f'ffx subcommand "{self.command}" returned {self.exit_status}, stdout: "{self.stdout}", stderr: "{self.stderr}"' + + +class FFXTimeout(signals.TestError): + """Timed out running a ffx command.""" + + +class OutputFormat(enum.StrEnum): + TEXT = "text" + JSON = "json" + JSON_PRETTY = "json-pretty" + + +class FFX: + """Device-specific controller for the ffx tool. + + Attributes: + log: Logger for the device-specific instance of ffx. + binary_path: Path to the ffx binary. + mdns_name: mDNS nodename of the default Fuchsia target. + ip: IP address of the default Fuchsia target. + ssh_private_key_path: Path to Fuchsia DUT SSH private key. + """ + + def __init__( + self, + binary_path: str, + mdns_name: str, + ip: str | None = None, + ssh_private_key_path: str | None = None, + subtools_search_path: str | None = None, + ): + """ + Args: + binary_path: Path to ffx binary. + target: Fuchsia mDNS nodename of default target. + ssh_private_key_path: Path to SSH private key for talking to the + Fuchsia DUT. + """ + self.log = logger.PrefixLoggerAdapter( + logging.getLogger(), + { + logger.PrefixLoggerAdapter.EXTRA_KEY_LOG_PREFIX: f"[ffx | {mdns_name}]", + }, + ) + self._binary_path = binary_path + self._mdns_name = mdns_name + self._ip = ip + self._ssh_private_key_path = ssh_private_key_path + self._subtools_search_path = subtools_search_path + + self._daemon: subprocess.Popen | None = None + self._daemon_log: io.TextIOWrapper | None = None + self._isolate_dir: str | None = None + self._sock_dir: str | None = None + self._ssh_auth_sock_path: str | None = None + self._overnet_socket_path: str | None = None + self._has_been_reachable = False + self._has_logged_version = False + + def clean_up(self) -> None: + self._stop_daemon() + self._has_been_reachable = False + self._has_logged_version = False + + def run( + self, + command: list[str], + timeout_sec: float = FFX_DEFAULT_COMMAND_TIMEOUT, + skip_status_code_check: bool = False, + skip_reachability_check: bool = False, + output_format: OutputFormat = OutputFormat.TEXT, + ) -> subprocess.CompletedProcess: + """Runs an ffx command. + + Verifies reachability before running, if it hasn't already. + + Args: + command: Command to run with ffx. + timeout_sec: Seconds to wait for a command to complete. + skip_status_code_check: Whether to check for the status code. + verify_reachable: Whether to verify reachability before running. + output_format: Desired output format; useful for parsing output. + + Raises: + FFXTimeout: when the command times out. + FFXError: when the command returns non-zero and skip_status_code_check is False. + + Returns: + The results of the command. Note subprocess.CompletedProcess returns + stdout and stderr as a byte-array, not a string. Treat these members + as such or convert to a string using bytes.decode('utf-8'). + """ + if not self._daemon: + self._start_daemon() + if not self._has_been_reachable and not skip_reachability_check: + self.log.info(f'Verifying reachability before running "{command}"') + self.verify_reachable() + return self._exec( + command, + timeout_sec, + check=not skip_status_code_check, + output_format=output_format, + ) + + def _exec( + self, + command: list[str], + timeout_sec: float, + check: bool = True, + output_format: OutputFormat = OutputFormat.TEXT, + ) -> subprocess.CompletedProcess[bytes]: + """Execute a ffx command without any other arguments. + + Args: + command: Command to run with ffx. + timeout_sec: Seconds to wait for a command to complete. + check: Whether to check for the status code. + + Raises: + FFXTimeout: when the command times out. + FFXError: when the command returns non-zero and skip_status_code_check is False. + + Returns: + The results of the command. Note subprocess.CompletedProcess returns + stdout and stderr as a byte-array, not a string. Treat these members + as such or convert to a string using bytes.decode('utf-8'). + """ + if not self._isolate_dir: + raise TypeError( + f"Expected _isolate_dir to be a str, got {type(self._isolate_dir)}" + ) + + self.log.debug(f'Running "{" ".join(command)}".') + + full_command = [self._binary_path, "--isolate-dir", self._isolate_dir] + match output_format: + case OutputFormat.TEXT: + full_command += command + case OutputFormat.JSON: + full_command += ["--machine", "json"] + command + case OutputFormat.JSON_PRETTY: + full_command += ["--machine", "json-pretty"] + command + + try: + result = subprocess.run( + full_command, + capture_output=True, + timeout=timeout_sec, + check=check, + ) + self.log.debug( + f'Result of "{" ".join(command)}":\n' + f'stdout: {result.stdout.decode("utf-8", errors="replace")}\n' + f'stderr: {result.stderr.decode("utf-8", errors="replace")}' + ) + return result + except subprocess.CalledProcessError as e: + raise FFXError(" ".join(command), e) from e + except subprocess.TimeoutExpired as e: + raise FFXTimeout(f'Timed out running "{" ".join(command)}"') from e + + def _start_daemon(self) -> None: + """Create a new isolated environment for ffx. + + This is needed to avoid overlapping ffx daemons while testing in + parallel, causing the ffx invocations to “upgrade” one daemon to + another, which appears as a flap/restart to another test. + """ + # Store ffx files in a unique directory. Timestamp is used to prevent + # files from being overwritten in the case when a test intentionally + # reboots or resets the device such that a new isolated ffx environment + # is created. + root_dir = context.get_current_context().get_full_output_path() + epoch = utils.get_current_epoch_time() + time_stamp = logger.normalize_log_line_timestamp( + logger.epoch_to_log_line_timestamp(epoch) + ) + self._isolate_dir = os.path.join(root_dir, f"{self._mdns_name}_{time_stamp}") + os.makedirs(self._isolate_dir, exist_ok=True) + + # Sockets need to be created in a different directory to be guaranteed + # to stay under the maximum socket path length of 104 characters. + # See https://unix.stackexchange.com/q/367008 + self._sock_dir = tempfile.mkdtemp() + # On MacOS, the socket paths need to be just paths (not pre-created + # Python tempfiles, which are not socket files). + self._ssh_auth_sock_path = str(PurePath(self._sock_dir, "ssh_auth_sock")) + self._overnet_socket_path = str(PurePath(self._sock_dir, "overnet_socket")) + + cmds = [ + [ + "config", + "set", + "log.dir", + os.path.join(self._isolate_dir, "ffx_logs"), + ], + ["config", "set", "log.level", "debug"], + ["config", "set", "target.default", self._mdns_name], + # Use user-specific and device-specific locations for sockets. + # Avoids user permission errors in a multi-user test environment. + # Avoids daemon upgrades when running tests in parallel in a CI + # environment. + ["config", "set", "ssh.auth-sock", self._ssh_auth_sock_path], + ["config", "set", "overnet.socket", self._overnet_socket_path], + # Alias to disable metrics, device discovery, device auto connection, etc. + ["config", "set", "ffx.isolated", "true"], + # Control the daemon's lifecycle directly + ["config", "set", "daemon.autostart", "false"], + ] + + if not self._ip: + cmds.append(["config", "set", "discovery.mdns.enabled", "true"]) + + # ffx looks for the private key in several default locations. For + # testbeds which have the private key in another location, set it now. + if self._ssh_private_key_path: + cmds.append( + [ + "config", + "set", + "ssh.priv", + f'["{self._ssh_private_key_path}"]', + ] + ) + + if self._subtools_search_path: + cmds.append( + [ + "config", + "set", + "ffx.subtool-search-paths", + self._subtools_search_path, + ] + ) + + for cmd in cmds: + self._exec(cmd, FFX_CONFIG_TIMEOUT_SEC) + + self._daemon_log = open( + os.path.join(self._isolate_dir, "daemon.log"), + "a+", + encoding="utf-8", + ) + + # Start the daemon + self._daemon = subprocess.Popen( + [ + self._binary_path, + "--isolate-dir", + self._isolate_dir, + "daemon", + "start", + ], + stdout=self._daemon_log, + ) + + # Wait for overnet_socket to be created + @retry(stop=stop_after_delay(5), wait=wait_fixed(0.1)) + def wait_for_socket(path: str) -> None: + if not Path(path).is_socket(): + raise FileNotFoundError(f"Socket not found: {path}") + + wait_for_socket(self._overnet_socket_path) + + if self._ip: + self._exec( + ["target", "add", self._ip, "--nowait"], + FFX_TARGET_ADD_TIMEOUT_SEC, + ) + + result = self._exec(["config", "get"], FFX_CONFIG_TIMEOUT_SEC) + self.log.debug(f'Config:\n{result.stdout.decode("utf-8", errors="replace")}') + + def _stop_daemon(self) -> None: + if self._daemon: + self.run( + # TODO(b/332983529): Add the following arguments once ffx daemon + # stops correctly. + # ["-t", str(FFX_DAEMON_STOP_TIMEOUT_SEC * 1000)] + ["daemon", "stop"], + skip_reachability_check=True, + ) + self._daemon.wait(timeout=FFX_DAEMON_STOP_TIMEOUT_SEC) + self._daemon = None + + if self._daemon_log: + self._daemon_log.close() + self._daemon_log = None + + if self._ssh_auth_sock_path: + Path(self._ssh_auth_sock_path).unlink(missing_ok=True) + self._ssh_auth_sock_path = None + + if self._overnet_socket_path: + Path(self._overnet_socket_path).unlink(missing_ok=True) + self._overnet_socket_path = None + + if self._sock_dir: + rmtree(self._sock_dir) + self._sock_dir = None + + self._isolate_dir = None + + def verify_reachable(self, timeout_sec: int = FFX_DEFAULT_COMMAND_TIMEOUT) -> None: + """Verify the target is reachable via RCS and various services. + + Blocks until the device allows for an RCS connection. If the device + isn't reachable within a short time, logs a warning before waiting + longer. + + Verifies the RCS connection by fetching information from the device, + which exercises several debug and informational FIDL services. + + When called for the first time, the versions will be checked for + compatibility. + + Args: + timeout_sec: Seconds to wait for reachability check + + Raises: + FFXError: when an unknown error occurs + FFXTimeout: when the target is unreachable + """ + last_err: Exception | None = None + timeout = time.perf_counter() + timeout_sec + while True: + try: + self.run( + ["target", "wait"], + timeout_sec=FFX_CONFIG_TIMEOUT_SEC, + skip_reachability_check=True, + ) + break + except FFXError as e: + if "took too long connecting to ascendd socket" in e.stderr: + last_err = e + else: + raise e + except FFXTimeout as e: + last_err = e + + if time.perf_counter() > timeout: + raise FFXTimeout( + f"Waited over {timeout_sec}s for ffx to become reachable" + ) from last_err + + # Use a shorter timeout than default because device information + # gathering can hang for a long time if the device is not actually + # connectable. + try: + result = self.run( + ["target", "show"], + timeout_sec=15, + skip_reachability_check=True, + output_format=OutputFormat.JSON_PRETTY, + ) + except Exception as e: + self.log.error( + f'Failed to reach target device. Try running "{self._binary_path}' + + ' doctor" to diagnose issues.' + ) + raise e + + self._has_been_reachable = True + + if not self._has_logged_version: + self._has_logged_version = True + self.compare_version(result) + + def compare_version(self, target_show_result: subprocess.CompletedProcess) -> None: + """Compares the version of Fuchsia with the version of ffx. + + Args: + target_show_result: Result of the target show command with JSON + output mode enabled + """ + result_raw = target_show_result.stdout + try: + result_json = json.loads(result_raw) + self.log.debug(f"Target show result: {result_json}") + device_version = result_json["build"]["version"] + except (AttributeError, json.JSONDecodeError) as e: + raise signals.TestAbortClass( + f'Failed to parse response of "ffx target show":\n{result_raw}' + ) from e + + ffx_version = self.run(["version"]).stdout.decode("utf-8") + + self.log.info(f"Device version: {device_version}, ffx version: {ffx_version}") + if device_version != ffx_version: + self.log.warning( + "ffx versions that differ from device versions may" + + " have compatibility issues. It is recommended to" + + " use versions within 6 weeks of each other." + )
diff --git a/src/antlion/controllers/fuchsia_lib/lib_controllers/__init__.py b/packages/antlion/controllers/fuchsia_lib/lib_controllers/__init__.py similarity index 100% rename from src/antlion/controllers/fuchsia_lib/lib_controllers/__init__.py rename to packages/antlion/controllers/fuchsia_lib/lib_controllers/__init__.py
diff --git a/packages/antlion/controllers/fuchsia_lib/lib_controllers/wlan_controller.py b/packages/antlion/controllers/fuchsia_lib/lib_controllers/wlan_controller.py new file mode 100644 index 0000000..7d16363 --- /dev/null +++ b/packages/antlion/controllers/fuchsia_lib/lib_controllers/wlan_controller.py
@@ -0,0 +1,137 @@ +#!/usr/bin/env python3 +# +# Copyright 2022 The Fuchsia Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import time +from dataclasses import dataclass + +from honeydew.affordances.connectivity.wlan.utils.types import ( + CountryCode, + QueryIfaceResponse, + WlanMacRole, +) +from honeydew.interfaces.device_classes.fuchsia_device import ( + FuchsiaDevice as HdFuchsiaDevice, +) +from mobly import logger, signals + +from antlion import utils + +TIME_TO_SLEEP_BETWEEN_RETRIES = 1 +TIME_TO_WAIT_FOR_COUNTRY_CODE = 10 + + +@dataclass(frozen=True) +class WlanInterfaces: + """WLAN interfaces separated device type and keyed by name.""" + + client: dict[str, QueryIfaceResponse] + """Client WLAN interfaces keyed by name.""" + + ap: dict[str, QueryIfaceResponse] + """AP WLAN interfaces keyed by name.""" + + +class WlanControllerError(signals.ControllerError): + pass + + +class WlanController: + """Contains methods related to wlan core, to be used in FuchsiaDevice object""" + + def __init__(self, honeydew: HdFuchsiaDevice) -> None: + self.honeydew = honeydew + self.log = logger.PrefixLoggerAdapter( + logging.getLogger(), + { + logger.PrefixLoggerAdapter.EXTRA_KEY_LOG_PREFIX: f"[WlanController | {self.honeydew.device_name}]", + }, + ) + + def get_interfaces_by_role(self) -> WlanInterfaces: + """Retrieves WLAN interface information.""" + + # Retrieve WLAN interface IDs + wlan_iface_ids = self.honeydew.wlan.get_iface_id_list() + if len(wlan_iface_ids) < 1: + return WlanInterfaces(client={}, ap={}) + + # Use IDs to get WLAN interface info and mac addresses + wlan_ifaces_by_mac: dict[str, QueryIfaceResponse] = {} + for id in wlan_iface_ids: + result = self.honeydew.wlan.query_iface(id) + mac = utils.mac_address_list_to_str(bytes(result.sta_addr)) + wlan_ifaces_by_mac[mac] = result + + # Use mac addresses to query the interfaces from the netstack view, + # which allows us to supplement the interface information with the name, + # netstack_id, etc. + + # TODO(http://fxb/75909): This tedium is necessary to get the interface name + # because only netstack has that information. The bug linked here is + # to reconcile some of the information between the two perspectives, at + # which point we can eliminate step. + ifaces = self.honeydew.netstack.list_interfaces() + + client: dict[str, QueryIfaceResponse] = {} + ap: dict[str, QueryIfaceResponse] = {} + + for iface in ifaces: + if iface.mac is None: + self.log.debug(f"No MAC address for iface {iface.name}") + continue + + mac = str(iface.mac) + if mac in wlan_ifaces_by_mac: + result = wlan_ifaces_by_mac[mac] + match result.role: + case WlanMacRole.CLIENT: + client[iface.name] = result + case WlanMacRole.AP: + ap[iface.name] = result + case _: + raise ValueError(f'Unexpected WlanMacRole "{result.role}"') + + return WlanInterfaces(client, ap) + + def set_country_code(self, country_code: CountryCode) -> None: + """Sets country code through the regulatory region service and waits + for the code to be applied to WLAN PHY. + + Args: + country_code: the 2 character country code to set + + Raises: + EnvironmentError - failure to get/set regulatory region + ConnectionError - failure to query PHYs + """ + self.log.info(f"Setting DUT country code to {country_code}") + self.honeydew.wlan.set_region(country_code) + + self.log.info( + f"Verifying DUT country code was correctly set to {country_code}." + ) + phy_ids_response = self.honeydew.wlan.get_phy_id_list() + + end_time = time.time() + TIME_TO_WAIT_FOR_COUNTRY_CODE + while time.time() < end_time: + for id in phy_ids_response: + resp = self.honeydew.wlan.get_country(id) + if resp == country_code: + return + time.sleep(TIME_TO_SLEEP_BETWEEN_RETRIES) + else: + raise EnvironmentError(f"Failed to set DUT country code to {country_code}.")
diff --git a/packages/antlion/controllers/fuchsia_lib/lib_controllers/wlan_policy_controller.py b/packages/antlion/controllers/fuchsia_lib/lib_controllers/wlan_policy_controller.py new file mode 100644 index 0000000..2e95013 --- /dev/null +++ b/packages/antlion/controllers/fuchsia_lib/lib_controllers/wlan_policy_controller.py
@@ -0,0 +1,360 @@ +#!/usr/bin/env python3 +# +# Copyright 2022 The Fuchsia Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import time +from dataclasses import dataclass + +from honeydew.affordances.connectivity.wlan.utils.errors import HoneydewWlanError +from honeydew.affordances.connectivity.wlan.utils.types import ( + ConnectionState, + DisconnectStatus, + NetworkConfig, + NetworkState, + WlanClientState, +) +from honeydew.interfaces.device_classes.fuchsia_device import ( + FuchsiaDevice as HdFuchsiaDevice, +) +from mobly import logger, signals + +from antlion.controllers.fuchsia_lib.ssh import FuchsiaSSHProvider + +SESSION_MANAGER_TIMEOUT_SEC = 10 +FUCHSIA_DEFAULT_WLAN_CONFIGURE_TIMEOUT = 30 +DEFAULT_GET_UPDATE_TIMEOUT = 60 + + +class WlanPolicyControllerError(signals.ControllerError): + pass + + +@dataclass +class PreservedState: + saved_networks: list[NetworkConfig] | None + client_connections_state: WlanClientState | None + + +@dataclass +class ClientState: + state: str + networks: list[dict[str, object]] + + +# TODO(http://b/309854439): Add a ClientStateWatcher and refactor tests to allow test +# developers more control when update listeners are set and the client update state is +# reset. +class WlanPolicyController: + """Contains methods related to the wlan policy layer, to be used in the + FuchsiaDevice object.""" + + def __init__(self, honeydew: HdFuchsiaDevice, ssh: FuchsiaSSHProvider) -> None: + self.preserved_networks_and_client_state: PreservedState | None = None + self.policy_configured = False + self.honeydew = honeydew + self.ssh = ssh + self.log = logger.PrefixLoggerAdapter( + logging.getLogger(), + { + logger.PrefixLoggerAdapter.EXTRA_KEY_LOG_PREFIX: f"[WlanPolicyController | {self.ssh.config.host_name}]", + }, + ) + + def configure_wlan( + self, + preserve_saved_networks: bool, + timeout_sec: int = FUCHSIA_DEFAULT_WLAN_CONFIGURE_TIMEOUT, + ) -> None: + """Sets up wlan policy layer. + + Args: + preserve_saved_networks: whether to clear existing saved + networks and client state, to be restored at test close. + timeout_sec: time to wait for device to configure WLAN. + """ + + # We need to stop session manager to free control of + # fuchsia.wlan.policy.ClientController, which can only be used by a + # single caller at a time. Fuchsia Controller needs the ClientController + # to trigger WLAN policy state changes. On eng builds the + # session_manager can be restarted after being stopped during reboot so + # we attempt killing the session manager process for 10 seconds. + # See https://cs.opensource.google/fuchsia/fuchsia/+/main:sdk/fidl/fuchsia.wlan.policy/client_provider.fidl + if b"cast_agent.cm" in self.ssh.run("ps").stdout: + session_manager_expiration = time.time() + SESSION_MANAGER_TIMEOUT_SEC + while time.time() < session_manager_expiration: + self.ssh.stop_component("session_manager", is_cfv2_component=True) + + # Acquire control of policy layer + self.honeydew.wlan_policy.create_client_controller() + self.log.info("ACTS tests now have control of the WLAN policy layer.") + + if preserve_saved_networks and not self.preserved_networks_and_client_state: + self.preserved_networks_and_client_state = ( + self.remove_and_preserve_networks_and_client_state() + ) + + self.honeydew.wlan_policy.start_client_connections() + self.policy_configured = True + + def _deconfigure_wlan(self) -> None: + self.honeydew.wlan_policy.stop_client_connections() + self.policy_configured = False + + def clean_up(self) -> None: + if self.preserved_networks_and_client_state is not None: + # It is possible for policy to have been configured before, but + # deconfigured before test end. In this case, in must be setup + # before restoring networks + if not self.policy_configured: + self.configure_wlan(False) + + self.restore_preserved_networks_and_client_state() + + def _find_network( + self, ssid: str, networks: list[NetworkState] + ) -> NetworkState | None: + """Helper method to find network in list of network states. + + Args: + ssid: The network name to look for. + networks: The list of network states to look in. + + Returns: + Network state of target ssid or None if not found in networks. + """ + for network in networks: + if network.network_identifier.ssid == ssid: + return network + return None + + def wait_for_network_state( + self, + ssid: str, + expected_states: ConnectionState | set[ConnectionState], + expected_status: DisconnectStatus | None = None, + timeout_sec: int = DEFAULT_GET_UPDATE_TIMEOUT, + ) -> ConnectionState: + """Waits until the device returns with expected network state. + + Args: + ssid: The network name to check the state of. + expected_states: The network state or states we are expecting to see. + expected_status: The disconnect status of the network. Only relevant when + expected_state is FAILED or DISCONNECTED. + timeout_sec: The number of seconds to wait for a update showing connection. + + Returns: + Current network state if network converges on one of the expected states. + + Raises: + TypeError: If DisconnectStatus provided with a CONNECTING or CONNECTED + state. + WlanPolicyControllerError: If no network is found before timeout or fails to + converge to one of the expected states. + """ + + if not isinstance(expected_states, set): + expected_states = {expected_states} + + if ( + expected_states == {ConnectionState.CONNECTING, ConnectionState.CONNECTED} + or expected_states.issubset( + {ConnectionState.CONNECTING, ConnectionState.CONNECTED} + ) + and expected_status is not None + ): + raise TypeError( + "Disconnect status not valid for CONNECTING or CONNECTED states." + ) + + self.honeydew.wlan_policy.set_new_update_listener() + network: NetworkState | None = None + + end_time = time.time() + timeout_sec + while time.time() < end_time: + time_left = max(1.0, end_time - time.time()) + try: + client = self.honeydew.wlan_policy.get_update(timeout=time_left) + except TimeoutError as e: + self.log.debug("Timeout waiting for WLAN state updates: %s", e) + continue + + # If we don't find the network initially, wait and retry. + network = self._find_network(ssid, client.networks) + if network is None: + self.log.debug( + f"{ssid} not found in client networks: {client.networks}" + ) + continue + + if network.connection_state in expected_states: + # Check optional disconnect status matches. + if expected_status: + if network.disconnect_status is not expected_status: + raise WlanPolicyControllerError( + f"Disconnect status is not {expected_status}" + ) + elif network.connection_state is ConnectionState.CONNECTING: + self.log.debug(f"Network {ssid} still attempting to connect.") + continue + else: + raise WlanPolicyControllerError( + f'Expected network "{ssid}" to be in state {expected_states}, ' + f"got {network.connection_state}" + ) + + # Successfully converged on expected state and status + return network.connection_state + + if network is None: + raise WlanPolicyControllerError(f"Timed out trying to find ssid: {ssid}") + raise WlanPolicyControllerError( + f'Timed out waiting for "{ssid}" to reach state {expected_states} and ' + f"status {expected_status}" + ) + + def wait_for_client_state( + self, + expected_state: WlanClientState, + timeout_sec: int = DEFAULT_GET_UPDATE_TIMEOUT, + ) -> None: + """Waits until the client converges to expected state. + + Args: + expected_state: The client state we are waiting to see. + timeout_sec: Duration to wait for the desired_state. + + Raises: + WlanPolicyControllerError: If client still has not converged to expected + state at end of timeout. + """ + self.honeydew.wlan_policy.set_new_update_listener() + + last_err: TimeoutError | None = None + end_time = time.time() + timeout_sec + while time.time() < end_time: + time_left = max(1, int(end_time - time.time())) + try: + client = self.honeydew.wlan_policy.get_update(timeout=time_left) + except TimeoutError as e: + last_err = e + continue + if client.state is not expected_state: + # Continue getting updates. + continue + else: + return + else: + self.log.error( + f"Client state did not converge to the expected state: {expected_state}" + f" Waited:{timeout_sec}s" + ) + raise WlanPolicyControllerError from last_err + + def wait_for_no_connections( + self, timeout_sec: int = DEFAULT_GET_UPDATE_TIMEOUT + ) -> None: + """Waits to see that there are no connections to the device. + + Args: + timeout_sec: The time to wait to see no connections. + + Raises: + WlanPolicyControllerError: If client update has no networks or if client + still has connections at end of timeout. + """ + self.honeydew.wlan_policy.set_new_update_listener() + + last_err: TimeoutError | None = None + end_time = time.time() + timeout_sec + while time.time() < end_time: + curr_connected_networks: list[NetworkState] = [] + time_left = max(1, int(end_time - time.time())) + try: + client = self.honeydew.wlan_policy.get_update(timeout=time_left) + except TimeoutError as e: + # Retry to handle the cases in negative testing where we expect + # to receive an 'error'. + last_err = e + continue + + # Iterate through networks checking to see if any are still connected. + for network in client.networks: + if network.connection_state in { + ConnectionState.CONNECTING, + ConnectionState.CONNECTED, + }: + curr_connected_networks.append(network) + + if len(curr_connected_networks) != 0: + # Continue getting updates. + continue + else: + return + + self.log.error(f"Networks still connected. Waited: {timeout_sec}s") + raise WlanPolicyControllerError from last_err + + def remove_and_preserve_networks_and_client_state(self) -> PreservedState: + """Preserves networks already saved on devices before removing them. + + This method is used to set up a clean test environment. Records the state of + client connections before tests. + + Returns: + PreservedState: State of the client containing NetworkConfigs and client + connection state. + """ + client = self.honeydew.wlan_policy.get_update() + networks = self.honeydew.wlan_policy.get_saved_networks() + self.honeydew.wlan_policy.remove_all_networks() + self.log.info("Saved networks cleared and preserved.") + return PreservedState( + saved_networks=networks, client_connections_state=client.state + ) + + def restore_preserved_networks_and_client_state(self) -> None: + """Restore preserved networks and client state onto device.""" + if self.preserved_networks_and_client_state is None: + self.log.info("No preserved networks or client state to restore") + return + + self.honeydew.wlan_policy.remove_all_networks() + + saved_networks = self.preserved_networks_and_client_state.saved_networks + if saved_networks is not None: + for network in saved_networks: + try: + self.honeydew.wlan_policy.save_network( + network.ssid, + network.security_type, + network.credential_value, + ) + except HoneydewWlanError as e: + self.log.warning( + 'Failed to restore network "%s": %s', network.ssid, e + ) + + client_state = self.preserved_networks_and_client_state.client_connections_state + if client_state is not None: + if client_state is WlanClientState.CONNECTIONS_ENABLED: + self.honeydew.wlan_policy.start_client_connections() + else: + self.honeydew.wlan_policy.stop_client_connections() + + self.log.info("Preserved networks and client state restored.") + self.preserved_networks_and_client_state = None
diff --git a/src/antlion/controllers/fuchsia_lib/package_server.py b/packages/antlion/controllers/fuchsia_lib/package_server.py similarity index 89% rename from src/antlion/controllers/fuchsia_lib/package_server.py rename to packages/antlion/controllers/fuchsia_lib/package_server.py index d497e96..96cfbf8 100644 --- a/src/antlion/controllers/fuchsia_lib/package_server.py +++ b/packages/antlion/controllers/fuchsia_lib/package_server.py
@@ -15,25 +15,23 @@ # limitations under the License. import json +import logging import os import shutil import socket import subprocess import tarfile import tempfile - from dataclasses import dataclass from datetime import datetime -from typing import TextIO, List, Optional +from typing import TextIO -from antlion import context -from antlion import logger -from antlion import signals -from antlion import utils +from mobly import logger, signals -from antlion.controllers.fuchsia_lib.ssh import SSHError, SSHProvider +from antlion import context, utils +from antlion.controllers.fuchsia_lib.ssh import SSHProvider from antlion.net import wait_for_port -from antlion.tracelogger import TraceLogger +from antlion.runner import CalledProcessError DEFAULT_FUCHSIA_REPO_NAME = "fuchsia.com" PM_SERVE_STOP_TIMEOUT_SEC = 5 @@ -46,17 +44,17 @@ def random_port() -> int: s = socket.socket() s.bind(("", 0)) - return s.getsockname()[1] + return int(s.getsockname()[1]) @dataclass class Route: """Represent a route in the routing table.""" - preferred_source: Optional[str] + preferred_source: str | None -def find_routes_to(dest_ip) -> List[Route]: +def find_routes_to(dest_ip) -> list[Route]: """Find the routes used to reach a destination. Look through the routing table for the routes that would be used without @@ -124,11 +122,16 @@ packages_archive_path: Path to an archive containing the pm binary and amber-files. """ - self.log: TraceLogger = logger.create_tagged_trace_logger("pm") + self.log = logger.PrefixLoggerAdapter( + logging.getLogger(), + { + logger.PrefixLoggerAdapter.EXTRA_KEY_LOG_PREFIX: "[pm]", + }, + ) - self._server_log: Optional[TextIO] = None - self._server_proc: Optional[subprocess.Popen] = None - self._log_path: Optional[str] = None + self._server_log: TextIO | None = None + self._server_proc: subprocess.Popen | None = None + self._log_path: str | None = None self._tmp_dir = tempfile.mkdtemp(prefix="packages-") tar = tarfile.open(packages_archive_path, "r:gz") @@ -213,9 +216,9 @@ """ # Remove any existing repositories that may be stale. try: - ssh.run(f"pkgctl repo rm fuchsia-pkg://{repo_name}") - except SSHError as e: - if "NOT_FOUND" not in e.result.stderr: + ssh.run(["pkgctl", "repo", "rm", f"fuchsia-pkg://{repo_name}"]) + except CalledProcessError as e: + if b"NOT_FOUND" not in e.stderr: raise e # Configure the device with the new repository.
diff --git a/packages/antlion/controllers/fuchsia_lib/sl4f.py b/packages/antlion/controllers/fuchsia_lib/sl4f.py new file mode 100644 index 0000000..c3ecf24 --- /dev/null +++ b/packages/antlion/controllers/fuchsia_lib/sl4f.py
@@ -0,0 +1,89 @@ +#!/usr/bin/env python3 +# +# Copyright 2022 The Fuchsia Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import ipaddress +import logging + +from mobly import logger + +from antlion.controllers.fuchsia_lib.ssh import FuchsiaSSHProvider +from antlion.controllers.fuchsia_lib.wlan_deprecated_configuration_lib import ( + FuchsiaWlanDeprecatedConfigurationLib, +) +from antlion.net import wait_for_port +from antlion.runner import CalledProcessError + +DEFAULT_SL4F_PORT = 80 +START_SL4F_V2_CMD = "start_sl4f" + + +class SL4F: + """Module for Fuchsia devices to interact with the SL4F tool. + + Attributes: + ssh: Transport to start and stop SL4F. + address: http address for SL4F server including SL4F port. + log: Logger for the device-specific instance of SL4F. + """ + + def __init__( + self, + ssh: FuchsiaSSHProvider, + port: int = DEFAULT_SL4F_PORT, + ) -> None: + """ + Args: + ssh: Transport to start and stop SL4F. + port: Port for the SL4F server to listen on. + """ + ip = ipaddress.ip_address(ssh.config.host_name) + if ip.version == 4: + self.address = f"http://{ip}:{port}" + elif ip.version == 6: + self.address = f"http://[{ip}]:{port}" + + self.log = logger.PrefixLoggerAdapter( + logging.getLogger(), + { + logger.PrefixLoggerAdapter.EXTRA_KEY_LOG_PREFIX: f"[SL4F | {self.address}]", + }, + ) + + try: + ssh.stop_component("sl4f") + ssh.run(START_SL4F_V2_CMD).stdout + except CalledProcessError: + # TODO(fxbug.dev/42181764) Remove support to run SL4F in CFv1 mode + # once ACTS no longer use images that comes with only CFv1 SL4F. + self.log.warn( + "Running SL4F in CFv1 mode, " + "this is deprecated for images built after 5/9/2022, " + "see https://fxbug.dev/42157029 for more info." + ) + ssh.stop_component("sl4f") + ssh.start_v1_component("sl4f") + + try: + wait_for_port(ssh.config.host_name, port) + self.log.info("SL4F server is reachable") + except TimeoutError as e: + raise TimeoutError("SL4F server is unreachable") from e + + def _init_libraries(self) -> None: + # Grabs command from FuchsiaWlanDeprecatedConfigurationLib + self.wlan_deprecated_configuration_lib = FuchsiaWlanDeprecatedConfigurationLib( + self.address + )
diff --git a/src/antlion/controllers/fuchsia_lib/ssh.py b/packages/antlion/controllers/fuchsia_lib/ssh.py similarity index 86% rename from src/antlion/controllers/fuchsia_lib/ssh.py rename to packages/antlion/controllers/fuchsia_lib/ssh.py index 1d1f421..94e2001 100644 --- a/src/antlion/controllers/fuchsia_lib/ssh.py +++ b/packages/antlion/controllers/fuchsia_lib/ssh.py
@@ -16,7 +16,8 @@ import time -from antlion.capabilities.ssh import SSHError, SSHProvider +from antlion.capabilities.ssh import SSHProvider +from antlion.runner import CalledProcessError DEFAULT_SSH_USER: str = "fuchsia" DEFAULT_SSH_PRIVATE_KEY: str = "~/.ssh/fuchsia_ed25519" @@ -45,15 +46,16 @@ """ # The "run -d" command will hang when executed without a pseudo-tty # allocated. + self.config.force_tty = True self.run( f"run -d fuchsia-pkg://{repo}/{component}#meta/{component}.cmx", - force_tty=True, ) + self.config.force_tty = False timeout = time.perf_counter() + timeout_sec while True: ps_cmd = self.run("ps") - if f"{component}.cmx" in ps_cmd.stdout: + if f"{component}.cmx" in ps_cmd.stdout.decode("utf-8"): return if time.perf_counter() > timeout: raise TimeoutError( @@ -70,10 +72,10 @@ suffix = "cm" if is_cfv2_component else "cmx" try: - self.run(f"killall {component}.{suffix}") + self.run(["killall", f"{component}.{suffix}"]) self.log.info(f"Stopped component: {component}.{suffix}") - except SSHError as e: - if "no tasks found" in e.result.stderr: + except CalledProcessError as e: + if b"no tasks found" in e.stderr: self.log.debug(f"Could not find component: {component}.{suffix}") return raise e
diff --git a/src/antlion/controllers/fuchsia_lib/wlan_deprecated_configuration_lib.py b/packages/antlion/controllers/fuchsia_lib/wlan_deprecated_configuration_lib.py similarity index 97% rename from src/antlion/controllers/fuchsia_lib/wlan_deprecated_configuration_lib.py rename to packages/antlion/controllers/fuchsia_lib/wlan_deprecated_configuration_lib.py index a53698b..df3f66e 100644 --- a/src/antlion/controllers/fuchsia_lib/wlan_deprecated_configuration_lib.py +++ b/packages/antlion/controllers/fuchsia_lib/wlan_deprecated_configuration_lib.py
@@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from antlion import logger from antlion.controllers.fuchsia_lib.base_lib import BaseLib
diff --git a/src/antlion/controllers/iperf_client.py b/packages/antlion/controllers/iperf_client.py similarity index 67% rename from src/antlion/controllers/iperf_client.py rename to packages/antlion/controllers/iperf_client.py index 9ad6efc..a24330d 100644 --- a/src/antlion/controllers/iperf_client.py +++ b/packages/antlion/controllers/iperf_client.py
@@ -14,31 +14,32 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + import logging import os -import subprocess import socket +import subprocess import threading +from abc import ABC, abstractmethod from antlion import context -from antlion import utils +from antlion.capabilities.ssh import SSHConfig from antlion.controllers.adb_lib.error import AdbCommandError from antlion.controllers.android_device import AndroidDevice from antlion.controllers.fuchsia_lib.ssh import SSHProvider -from antlion.controllers.iperf_server import _AndroidDeviceBridge -from antlion.controllers.utils_lib.ssh import connection -from antlion.controllers.utils_lib.ssh import settings -from antlion.libs.proc import job +from antlion.controllers.utils_lib.commands.date import LinuxDateCommand +from antlion.types import ControllerConfig, Json +from antlion.validation import MapValidator -MOBLY_CONTROLLER_CONFIG_NAME = "IPerfClient" -ACTS_CONTROLLER_REFERENCE_NAME = "iperf_clients" +MOBLY_CONTROLLER_CONFIG_NAME: str = "IPerfClient" class IPerfError(Exception): """Raised on execution errors of iPerf.""" -def create(configs): +def create(configs: list[ControllerConfig]) -> list[IPerfClientBase]: """Factory method for iperf clients. The function creates iperf clients based on at least one config. @@ -49,18 +50,15 @@ Args: configs: config parameters for the iperf server """ - results = [] - for c in configs: - if type(c) is dict and "AndroidDevice" in c: - results.append( - IPerfClientOverAdb( - c["AndroidDevice"], test_interface=c.get("test_interface") - ) - ) - elif type(c) is dict and "ssh_config" in c: + results: list[IPerfClientBase] = [] + for config in configs: + c = MapValidator(config) + if "ssh_config" in config: results.append( IPerfClientOverSsh( - c["ssh_config"], test_interface=c.get("test_interface") + SSHProvider(SSHConfig.from_config(c.get(dict, "ssh_config"))), + test_interface=c.get(str, "test_interface"), + sync_date=True, ) ) else: @@ -68,21 +66,20 @@ return results -def get_info(iperf_clients): - """Placeholder for info about iperf clients - - Returns: - None - """ - return None - - -def destroy(_): +def destroy(objects: list[IPerfClientBase]) -> None: # No cleanup needed. pass -class IPerfClientBase(object): +def get_info(objects: list[IPerfClientBase]) -> list[Json]: + return [] + + +class RouteNotFound(ConnectionError): + """Failed to find a route to the iperf server.""" + + +class IPerfClientBase(ABC): """The Base class for all IPerfClients. This base class is responsible for synchronizing the logging to prevent @@ -96,8 +93,19 @@ __log_file_lock = threading.Lock() + @property + @abstractmethod + def test_interface(self) -> str | None: + """Find the test interface. + + Returns: + Name of the interface used to communicate with server_ap, or None if + not set. + """ + ... + @staticmethod - def _get_full_file_path(tag=""): + def _get_full_file_path(tag: str = "") -> str: """Returns the full file path for the IPerfClient log file. Note: If the directory for the file path does not exist, it will be @@ -121,7 +129,14 @@ return os.path.join(full_out_dir, out_file_name) - def start(self, ip, iperf_args, tag, timeout=3600, iperf_binary=None): + def start( + self, + ip: str, + iperf_args: str, + tag: str, + timeout: int = 3600, + iperf_binary: str | None = None, + ) -> str: """Starts iperf client, and waits for completion. Args: @@ -142,7 +157,18 @@ class IPerfClient(IPerfClientBase): """Class that handles iperf3 client operations.""" - def start(self, ip, iperf_args, tag, timeout=3600, iperf_binary=None): + @property + def test_interface(self) -> str | None: + return None + + def start( + self, + ip: str, + iperf_args: str, + tag: str, + timeout: int = 3600, + iperf_binary: str | None = None, + ) -> str: """Starts iperf client, and waits for completion. Args: @@ -163,7 +189,7 @@ ) iperf_binary = "iperf3" else: - logging.debug("Using iperf3 binary located at %s" % iperf_binary) + logging.debug(f"Using iperf3 binary located at {iperf_binary}") iperf_cmd = [str(iperf_binary), "-c", ip] + iperf_args.split(" ") full_out_path = self._get_full_file_path(tag) @@ -178,28 +204,30 @@ def __init__( self, - ssh_config: str, - test_interface: str = None, - ssh_provider: SSHProvider = None, + ssh_provider: SSHProvider, + test_interface: str | None = None, + sync_date: bool = True, ): self._ssh_provider = ssh_provider - if not self._ssh_provider: - self._ssh_settings = settings.from_config(ssh_config) - if not ( - utils.is_valid_ipv4_address(self._ssh_settings.hostname) - or utils.is_valid_ipv6_address(self._ssh_settings.hostname) - ): - mdns_ip = utils.get_fuchsia_mdns_ipv6_address( - self._ssh_settings.hostname - ) - if mdns_ip: - self._ssh_settings.hostname = mdns_ip - self._ssh_session = None - self.start_ssh() + self._test_interface = test_interface - self.test_interface = test_interface + if sync_date: + # iperf clients are not given internet access, so their system time + # needs to be manually set to be accurate. + LinuxDateCommand(self._ssh_provider).sync() - def start(self, ip, iperf_args, tag, timeout=3600, iperf_binary=None): + @property + def test_interface(self) -> str | None: + return self._test_interface + + def start( + self, + ip: str, + iperf_args: str, + tag: str, + timeout: int = 3600, + iperf_binary: str | None = None, + ) -> str: """Starts iperf client, and waits for completion. Args: @@ -220,49 +248,31 @@ ) iperf_binary = "iperf3" else: - logging.debug("Using iperf3 binary located at %s" % iperf_binary) - iperf_cmd = "{} -c {} {}".format(iperf_binary, ip, iperf_args) + logging.debug(f"Using iperf3 binary located at {iperf_binary}") + iperf_cmd = f"{iperf_binary} -c {ip} {iperf_args}" full_out_path = self._get_full_file_path(tag) try: - self.start_ssh() - if self._ssh_provider: - iperf_process = self._ssh_provider.run(iperf_cmd, timeout_sec=timeout) - else: - iperf_process = self._ssh_session.run(iperf_cmd, timeout=timeout) + iperf_process = self._ssh_provider.run(iperf_cmd, timeout_sec=timeout) iperf_output = iperf_process.stdout - with open(full_out_path, "w") as out_file: + with open(full_out_path, "wb") as out_file: out_file.write(iperf_output) except socket.timeout: raise TimeoutError( "Socket timeout. Timed out waiting for iperf " "client to finish." ) except Exception as err: - logging.exception("iperf run failed: {}".format(err)) + logging.exception(f"iperf run failed: {err}") return full_out_path - def start_ssh(self): - """Starts an ssh session to the iperf client.""" - if self._ssh_provider: - # SSH sessions are created by the provider. - return - if not self._ssh_session: - self._ssh_session = connection.SshConnection(self._ssh_settings) - - def close_ssh(self): - """Closes the ssh session to the iperf client, if one exists, preventing - connection reset errors when rebooting client device. - """ - if self._ssh_session: - self._ssh_session.close() - self._ssh_session = None - class IPerfClientOverAdb(IPerfClientBase): """Class that handles iperf3 operations over ADB devices.""" - def __init__(self, android_device_or_serial, test_interface=None): + def __init__( + self, android_device: AndroidDevice, test_interface: str | None = None + ): """Creates a new IPerfClientOverAdb object. Args: @@ -273,19 +283,21 @@ test_interface: The network interface that will be used to send traffic to the iperf server. """ - self._android_device_or_serial = android_device_or_serial - self.test_interface = test_interface + self._android_device = android_device + self._test_interface = test_interface @property - def _android_device(self): - if isinstance(self._android_device_or_serial, AndroidDevice): - return self._android_device_or_serial - else: - return _AndroidDeviceBridge.android_devices()[ - self._android_device_or_serial - ] + def test_interface(self) -> str | None: + return self._test_interface - def start(self, ip, iperf_args, tag, timeout=3600, iperf_binary=None): + def start( + self, + ip: str, + iperf_args: str, + tag: str, + timeout: int = 3600, + iperf_binary: str | None = None, + ) -> str: """Starts iperf client, and waits for completion. Args: @@ -308,13 +320,13 @@ ) iperf_binary = "iperf3" else: - logging.debug("Using iperf3 binary located at %s" % iperf_binary) - iperf_cmd = "{} -c {} {}".format(iperf_binary, ip, iperf_args) + logging.debug(f"Using iperf3 binary located at {iperf_binary}") + iperf_cmd = f"{iperf_binary} -c {ip} {iperf_args}" out = self._android_device.adb.shell(str(iperf_cmd), timeout=timeout) clean_out = out.split("\n") if "error" in clean_out[0].lower(): raise IPerfError(clean_out) - except (job.TimeoutError, AdbCommandError): + except (subprocess.TimeoutExpired, AdbCommandError): logging.warning("TimeoutError: Iperf measurement failed.") full_out_path = self._get_full_file_path(tag)
diff --git a/packages/antlion/controllers/iperf_server.py b/packages/antlion/controllers/iperf_server.py new file mode 100755 index 0000000..bee4679 --- /dev/null +++ b/packages/antlion/controllers/iperf_server.py
@@ -0,0 +1,620 @@ +#!/usr/bin/env python3 +# +# Copyright 2022 The Fuchsia Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import json +import logging +import math +import os +import shlex +import subprocess +import threading +import time +from typing import IO + +from mobly import logger, signals + +from antlion import context, utils +from antlion.controllers.utils_lib.commands import nmcli +from antlion.controllers.utils_lib.commands.command import optional, require +from antlion.controllers.utils_lib.commands.journalctl import LinuxJournalctlCommand +from antlion.controllers.utils_lib.ssh import connection, settings +from antlion.libs.proc import job +from antlion.types import ControllerConfig, Json +from antlion.validation import MapValidator + +MOBLY_CONTROLLER_CONFIG_NAME: str = "IPerfServer" +KILOBITS = 1024 +MEGABITS = KILOBITS * 1024 +GIGABITS = MEGABITS * 1024 +BITS_IN_BYTE = 8 + + +def create( + configs: list[ControllerConfig], +) -> list[IPerfServer | IPerfServerOverSsh]: + """Factory method for iperf servers. + + The function creates iperf servers based on at least one config. + If configs only specify a port number, a regular local IPerfServer object + will be created. If configs contains ssh settings or and AndroidDevice, + remote iperf servers will be started on those devices + + Args: + configs: config parameters for the iperf server + """ + results: list[IPerfServer | IPerfServerOverSsh] = [] + for c in configs: + if isinstance(c, (str, int)) and str(c).isdigit(): + results.append(IPerfServer(int(c))) + elif isinstance(c, dict) and "ssh_config" in c and "port" in c: + config = MapValidator(c) + results.append( + IPerfServerOverSsh( + settings.from_config(config.get(dict, "ssh_config")), + config.get(int, "port"), + test_interface=config.get(str, "test_interface"), + use_killall=config.get(bool, "use_killall", False), + ) + ) + else: + raise ValueError( + f"Config entry {c} in {configs} is not a valid IPerfServer config." + ) + return results + + +def destroy( + objects: list[IPerfServer | IPerfServerOverSsh], +) -> None: + for iperf_server in objects: + try: + iperf_server.stop() + except Exception: + logging.exception(f"Unable to properly clean up {iperf_server}.") + + +def get_info( + objects: list[IPerfServer | IPerfServerOverSsh], +) -> list[Json]: + return [] + + +class IPerfResult(object): + def __init__(self, result_path, reporting_speed_units="Mbytes"): + """Loads iperf result from file. + + Loads iperf result from JSON formatted server log. File can be accessed + before or after server is stopped. Note that only the first JSON object + will be loaded and this funtion is not intended to be used with files + containing multiple iperf client runs. + """ + # if result_path isn't a path, treat it as JSON + self.reporting_speed_units = reporting_speed_units + if not os.path.exists(result_path): + self.result = json.loads(result_path) + else: + try: + with open(result_path, "r") as f: + iperf_output = f.readlines() + if "}\n" in iperf_output: + iperf_output = iperf_output[: iperf_output.index("}\n") + 1] + iperf_string = "".join(iperf_output) + iperf_string = iperf_string.replace("nan", "0") + self.result = json.loads(iperf_string) + except ValueError: + with open(result_path, "r") as f: + # Possibly a result from interrupted iperf run, + # skip first line and try again. + lines = f.readlines()[1:] + self.result = json.loads("".join(lines)) + + def _has_data(self): + """Checks if the iperf result has valid throughput data. + + Returns: + True if the result contains throughput data. False otherwise. + """ + return ("end" in self.result) and ( + "sum_received" in self.result["end"] or "sum" in self.result["end"] + ) + + def _get_reporting_speed( + self, network_speed_in_bits_per_second: int | float + ) -> float: + """Sets the units for the network speed reporting based on how the + object was initiated. Defaults to Megabytes per second. Currently + supported, bits per second (bits), kilobits per second (kbits), megabits + per second (mbits), gigabits per second (gbits), bytes per second + (bytes), kilobits per second (kbytes), megabits per second (mbytes), + gigabytes per second (gbytes). + + Args: + network_speed_in_bits_per_second: The network speed from iperf in + bits per second. + + Returns: + The value of the throughput in the appropriate units. + """ + speed_divisor = 1 + if self.reporting_speed_units[1:].lower() == "bytes": + speed_divisor = speed_divisor * BITS_IN_BYTE + if self.reporting_speed_units[0:1].lower() == "k": + speed_divisor = speed_divisor * KILOBITS + if self.reporting_speed_units[0:1].lower() == "m": + speed_divisor = speed_divisor * MEGABITS + if self.reporting_speed_units[0:1].lower() == "g": + speed_divisor = speed_divisor * GIGABITS + return network_speed_in_bits_per_second / speed_divisor + + def get_json(self): + """Returns the raw json output from iPerf.""" + return self.result + + @property + def error(self): + return self.result.get("error", None) + + @property + def avg_rate(self): + """Average UDP rate in MB/s over the entire run. + + This is the average UDP rate observed at the terminal the iperf result + is pulled from. According to iperf3 documentation this is calculated + based on bytes sent and thus is not a good representation of the + quality of the link. If the result is not from a success run, this + property is None. + """ + if not self._has_data() or "sum" not in self.result["end"]: + return None + bps = self.result["end"]["sum"]["bits_per_second"] + return self._get_reporting_speed(bps) + + @property + def avg_receive_rate(self): + """Average receiving rate in MB/s over the entire run. + + This data may not exist if iperf was interrupted. If the result is not + from a success run, this property is None. + """ + if not self._has_data() or "sum_received" not in self.result["end"]: + return None + bps = self.result["end"]["sum_received"]["bits_per_second"] + return self._get_reporting_speed(bps) + + @property + def avg_send_rate(self): + """Average sending rate in MB/s over the entire run. + + This data may not exist if iperf was interrupted. If the result is not + from a success run, this property is None. + """ + if not self._has_data() or "sum_sent" not in self.result["end"]: + return None + bps = self.result["end"]["sum_sent"]["bits_per_second"] + return self._get_reporting_speed(bps) + + @property + def instantaneous_rates(self): + """Instantaneous received rate in MB/s over entire run. + + This data may not exist if iperf was interrupted. If the result is not + from a success run, this property is None. + """ + if not self._has_data(): + return None + intervals = [ + self._get_reporting_speed(interval["sum"]["bits_per_second"]) + for interval in self.result["intervals"] + ] + return intervals + + @property + def std_deviation(self): + """Standard deviation of rates in MB/s over entire run. + + This data may not exist if iperf was interrupted. If the result is not + from a success run, this property is None. + """ + return self.get_std_deviation(0) + + def get_std_deviation(self, iperf_ignored_interval): + """Standard deviation of rates in MB/s over entire run. + + This data may not exist if iperf was interrupted. If the result is not + from a success run, this property is None. A configurable number of + beginning (and the single last) intervals are ignored in the + calculation as they are inaccurate (e.g. the last is from a very small + interval) + + Args: + iperf_ignored_interval: number of iperf interval to ignored in + calculating standard deviation + + Returns: + The standard deviation. + """ + if not self._has_data(): + return None + instantaneous_rates = self.instantaneous_rates[iperf_ignored_interval:-1] + avg_rate = math.fsum(instantaneous_rates) / len(instantaneous_rates) + sqd_deviations = [(rate - avg_rate) ** 2 for rate in instantaneous_rates] + std_dev = math.sqrt(math.fsum(sqd_deviations) / (len(sqd_deviations) - 1)) + return std_dev + + +class IPerfServerBase(object): + # Keeps track of the number of IPerfServer logs to prevent file name + # collisions. + __log_file_counter = 0 + + __log_file_lock = threading.Lock() + + def __init__(self, port: int): + self._port = port + # TODO(markdr): We shouldn't be storing the log files in an array like + # this. Nobody should be reading this property either. Instead, the + # IPerfResult should be returned in stop() with all the necessary info. + # See aosp/1012824 for a WIP implementation. + self.log_files: list[str] = [] + + @property + def port(self) -> int: + raise NotImplementedError("port must be specified.") + + @property + def started(self) -> bool: + raise NotImplementedError("started must be specified.") + + def start(self, extra_args: str = "", tag: str = "") -> None: + """Starts an iperf3 server. + + Args: + extra_args: Extra arguments to start iperf server with. + tag: Appended to log file name to identify logs from different + iperf runs. + """ + raise NotImplementedError("start() must be specified.") + + def stop(self) -> str | None: + """Stops the iperf server. + + Returns: + The name of the log file generated from the terminated session, or + None if iperf wasn't started or ran successfully. + """ + raise NotImplementedError("stop() must be specified.") + + def _get_full_file_path(self, tag: str | None = None) -> str: + """Returns the full file path for the IPerfServer log file. + + Note: If the directory for the file path does not exist, it will be + created. + + Args: + tag: The tag passed in to the server run. + """ + out_dir = self.log_path + + with IPerfServerBase.__log_file_lock: + tags = [tag, IPerfServerBase.__log_file_counter] + out_file_name = "IPerfServer,%s.log" % ( + ",".join([str(x) for x in tags if x != "" and x is not None]) + ) + IPerfServerBase.__log_file_counter += 1 + + file_path = os.path.join(out_dir, out_file_name) + self.log_files.append(file_path) + return file_path + + @property + def log_path(self) -> str: + current_context = context.get_current_context() + full_out_dir = os.path.join( + current_context.get_full_output_path(), f"IPerfServer{self.port}" + ) + + # Ensure the directory exists. + os.makedirs(full_out_dir, exist_ok=True) + + return full_out_dir + + +def _get_port_from_ss_output(ss_output, pid): + pid = str(pid) + lines = ss_output.split("\n") + for line in lines: + if pid in line: + # Expected format: + # tcp LISTEN 0 5 *:<PORT> *:* users:(("cmd",pid=<PID>,fd=3)) + return line.split()[4].split(":")[-1] + else: + raise ProcessLookupError("Could not find started iperf3 process.") + + +class IPerfServer(IPerfServerBase): + """Class that handles iperf server commands on localhost.""" + + def __init__(self, port: int = 5201) -> None: + super().__init__(port) + self._hinted_port = port + self._current_log_file: str | None = None + self._iperf_process: subprocess.Popen[bytes] | None = None + self._last_opened_file: IO[bytes] | None = None + + @property + def port(self) -> int: + return self._port + + @property + def started(self) -> bool: + return self._iperf_process is not None + + def start(self, extra_args: str = "", tag: str = "") -> None: + """Starts iperf server on local machine. + + Args: + extra_args: A string representing extra arguments to start iperf + server with. + tag: Appended to log file name to identify logs from different + iperf runs. + """ + if self._iperf_process is not None: + return + + self._current_log_file = self._get_full_file_path(tag) + + # Run an iperf3 server on the hinted port with JSON output. + command = ["iperf3", "-s", "-p", str(self._hinted_port), "-J"] + + command.extend(shlex.split(extra_args)) + + if self._last_opened_file: + self._last_opened_file.close() + self._last_opened_file = open(self._current_log_file, "wb") + self._iperf_process = subprocess.Popen( + command, stdout=self._last_opened_file, stderr=subprocess.DEVNULL + ) + for attempts_left in reversed(range(3)): + try: + self._port = int( + _get_port_from_ss_output( + job.run("ss -l -p -n | grep iperf").stdout, + self._iperf_process.pid, + ) + ) + break + except ProcessLookupError: + if attempts_left == 0: + raise + logging.debug("iperf3 process not started yet.") + time.sleep(0.01) + + def stop(self) -> str | None: + """Stops the iperf server. + + Returns: + The name of the log file generated from the terminated session, or + None if iperf wasn't started or ran successfully. + """ + if self._iperf_process is None: + return None + + if self._last_opened_file: + self._last_opened_file.close() + self._last_opened_file = None + + self._iperf_process.terminate() + self._iperf_process = None + + return self._current_log_file + + def __del__(self) -> None: + self.stop() + + +class IPerfServerOverSsh(IPerfServerBase): + """Class that handles iperf3 operations on remote machines.""" + + def __init__( + self, + ssh_settings: settings.SshSettings, + port: int, + test_interface: str, + use_killall: bool = False, + ): + super().__init__(port) + self.test_interface = test_interface + self.hostname = ssh_settings.hostname + self.log = logger.PrefixLoggerAdapter( + logging.getLogger(), + { + logger.PrefixLoggerAdapter.EXTRA_KEY_LOG_PREFIX: f"[IPerfServer | {self.hostname}]", + }, + ) + self._ssh_settings = ssh_settings + self._ssh_session: connection.SshConnection | None = connection.SshConnection( + ssh_settings + ) + self._journalctl = require(LinuxJournalctlCommand(self._ssh_session)) + + self._iperf_pid: str | None = None + self._current_tag: str | None = None + self._use_killall = str(use_killall).lower() == "true" + + # The control and test interfaces have to be different, otherwise + # performing a DHCP release+renewal risks severing the SSH connection + # and bricking the device. + control_interface = utils.get_interface_based_on_ip( + self._ssh_session, self.hostname + ) + if control_interface == test_interface: + raise signals.TestAbortAll( + f"iperf server control interface ({control_interface}) cannot be the " + f"same as the test interface ({test_interface})." + ) + + # Disable NetworkManager on the test interface + self._nmcli = optional(nmcli.LinuxNmcliCommand(self._ssh_session)) + if self._nmcli: + self._nmcli.setup_device(self.test_interface) + + @property + def port(self) -> int: + return self._port + + @property + def started(self) -> bool: + return self._iperf_pid is not None + + def _get_remote_log_path(self) -> str: + return f"/tmp/iperf_server_port{self.port}.log" + + def get_interface_ip_addresses(self, interface: str) -> dict[str, list[str]]: + """Gets all of the ip addresses, ipv4 and ipv6, associated with a + particular interface name. + + Args: + interface: The interface name on the device, ie eth0 + + Returns: + A list of dictionaries of the various IP addresses. See + utils.get_interface_ip_addresses. + """ + return utils.get_interface_ip_addresses(self._get_ssh(), interface) + + def renew_test_interface_ip_address(self) -> None: + """Renews the test interface's IPv4 address. + + Necessary for changing DHCP scopes during a test. + """ + utils.renew_linux_ip_address(self._get_ssh(), self.test_interface) + + def get_addr( + self, addr_type: str = "ipv4_private", timeout_sec: int | None = None + ) -> str: + """Wait until a type of IP address on the test interface is available + then return it. + """ + return utils.get_addr( + self._get_ssh(), self.test_interface, addr_type, timeout_sec + ) + + def _cleanup_iperf_port(self) -> None: + """Checks and kills zombie iperf servers occupying intended port.""" + assert self._ssh_session is not None + + netstat = self._ssh_session.run(["netstat", "-tupln"]).stdout.decode("utf-8") + for line in netstat.splitlines(): + if "LISTEN" in line and "iperf3" in line and f":{self.port}" in line: + pid = int(line.split()[-1].split("/")[0]) + logging.debug("Killing zombie server on port %i: %i", self.port, pid) + self._ssh_session.run(["kill", "-9", str(pid)]) + + def start( + self, + extra_args: str = "", + tag: str = "", + iperf_binary: str | None = None, + ) -> None: + """Starts iperf server on specified machine and port. + + Args: + extra_args: Extra arguments to start iperf server with. + tag: Appended to log file name to identify logs from different + iperf runs. + iperf_binary: Location of iperf3 binary. If none, it is assumed the + the binary is in the path. + """ + if self.started: + return + + self._cleanup_iperf_port() + if not iperf_binary: + logging.debug( + "No iperf3 binary specified. " "Assuming iperf3 is in the path." + ) + iperf_binary = "iperf3" + else: + logging.debug(f"Using iperf3 binary located at {iperf_binary}") + iperf_command = f"{iperf_binary} -s -J -p {self.port}" + + cmd = f"{iperf_command} {extra_args} > {self._get_remote_log_path()}" + + job_result = self._get_ssh().run_async(cmd) + self._iperf_pid = job_result.stdout.decode("utf-8") + self._current_tag = tag + + def stop(self) -> str | None: + """Stops the iperf server. + + Returns: + The name of the log file generated from the terminated session, or + None if iperf wasn't started or ran successfully. + """ + if not self.started: + return None + + ssh = self._get_ssh() + + if self._use_killall: + ssh.run(["killall", "iperf3"], ignore_status=True) + elif self._iperf_pid: + ssh.run(["kill", "-9", self._iperf_pid]) + + iperf_result = ssh.run(f"cat {self._get_remote_log_path()}") + + log_file = self._get_full_file_path(self._current_tag) + with open(log_file, "wb") as f: + f.write(iperf_result.stdout) + + ssh.run(["rm", self._get_remote_log_path()]) + self._iperf_pid = None + return log_file + + def _get_ssh(self) -> connection.SshConnection: + if self._ssh_session is None: + self._ssh_session = connection.SshConnection(self._ssh_settings) + + # Disable NetworkManager on the test interface + self._nmcli = optional(nmcli.LinuxNmcliCommand(self._ssh_session)) + if self._nmcli: + self._nmcli.setup_device(self.test_interface) + + return self._ssh_session + + def close_ssh(self) -> None: + """Closes the ssh session to the iperf server, if one exists, preventing + connection reset errors when rebooting server device. + """ + if self.started: + self.stop() + if self._ssh_session: + self._ssh_session.close() + self._ssh_session = None + + def get_systemd_journal(self) -> str: + had_ssh = False if self._ssh_session is None else True + + self._journalctl.set_runner(self._get_ssh()) + logs = self._journalctl.logs() + + if not had_ssh: + # Return to closed state + self.close_ssh() + + return logs
diff --git a/packages/antlion/controllers/openwrt_ap.py b/packages/antlion/controllers/openwrt_ap.py new file mode 100644 index 0000000..bb5c3a7 --- /dev/null +++ b/packages/antlion/controllers/openwrt_ap.py
@@ -0,0 +1,517 @@ +#!/usr/bin/env python3 +# +# Copyright 2024 The Fuchsia Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Controller for Open WRT access point.""" + +from __future__ import annotations + +import logging +import random +import re +import time +from typing import Literal + +import yaml +from mobly import logger, signals + +from antlion.controllers.openwrt_lib import ( + network_settings, + wireless_config, + wireless_settings_applier, +) +from antlion.controllers.openwrt_lib.openwrt_constants import SYSTEM_INFO_CMD +from antlion.controllers.openwrt_lib.openwrt_constants import ( + OpenWrtModelMap as modelmap, +) +from antlion.controllers.openwrt_lib.openwrt_constants import OpenWrtWifiSetting +from antlion.controllers.utils_lib.ssh import connection, settings +from antlion.types import ControllerConfig, Json + +MOBLY_CONTROLLER_CONFIG_NAME: str = "OpenWrtAP" +ACTS_CONTROLLER_REFERENCE_NAME = "access_points" +OWE_SECURITY = "owe" +SAE_SECURITY = "sae" +SAEMIXED_SECURITY = "sae-mixed" +ENABLE_RADIO = "0" +PMF_ENABLED = 2 +WAIT_TIME = 20 +DEFAULT_RADIOS = ("radio0", "radio1") + + +def create(configs: list[ControllerConfig]) -> list[OpenWrtAP]: + """Creates ap controllers from a json config. + + Creates an ap controller from either a list, or a single element. The element + can either be just the hostname or a dictionary containing the hostname and + username of the AP to connect to over SSH. + + Args: + configs: The json configs that represent this controller. + + Returns: + OpenWrtAP objects + + Example: + Below is the config file entry for OpenWrtAP as a list. A testbed can have + 1 or more APs to configure. Each AP has a "ssh_config" key to provide SSH + login information. OpenWrtAP#__init__() uses this to create SSH object. + + "OpenWrtAP": [ + { + "ssh_config": { + "user" : "root", + "host" : "192.168.1.1" + } + }, + { + "ssh_config": { + "user" : "root", + "host" : "192.168.1.2" + } + } + ] + """ + return [OpenWrtAP(c) for c in configs] + + +def destroy(objects: list[OpenWrtAP]) -> None: + """Destroys a list of OpenWrtAP. + + Args: + aps: The list of OpenWrtAP to destroy. + """ + for ap in objects: + ap.close() + ap.close_ssh() + + +def get_info(objects: list[OpenWrtAP]) -> list[Json]: + """Get information on a list of access points. + + Args: + aps: A list of OpenWrtAP. + + Returns: + A list of all aps hostname. + """ + return [ap.ssh_settings.hostname for ap in objects] + + +BSSIDMap = dict[Literal["2g", "5g"], dict[str, str]] + + +class OpenWrtAP(object): + """An OpenWrtAP controller. + + Attributes: + ssh: The ssh connection to the AP. + ssh_settings: The ssh settings being used by the ssh connection. + log: Logging object for OpenWrtAP. + wireless_setting: object holding wireless configuration. + network_setting: Object for network configuration. + model: OpenWrt HW model. + radios: Fit interface for test. + """ + + def __init__(self, config): + """Initialize AP.""" + self.ssh_settings = settings.from_config(config["ssh_config"]) + self.ssh = connection.SshConnection(self.ssh_settings) + self.log = logger.PrefixLoggerAdapter( + logging.getLogger(), + { + logger.PrefixLoggerAdapter.EXTRA_KEY_LOG_PREFIX: f"[OpenWrtAP|{self.ssh_settings.hostname}]", + }, + ) + self.wireless_setting: ( + wireless_settings_applier.WirelessSettingsApplier | None + ) = None + self.network_setting = network_settings.NetworkSettings( + self.ssh, self.ssh_settings, self.log + ) + self.model = self.get_model_name() + if self.model in modelmap.__dict__: + self.radios = modelmap.__dict__[self.model] + else: + self.radios = DEFAULT_RADIOS + + def configure_ap( + self, + wireless_configs: list[wireless_config.WirelessConfig], + channel_2g: int, + channel_5g: int, + ): + """Configure AP with the required settings. + + Each test class inherits WifiBaseTest. Based on the test, we may need to + configure PSK, WEP, OPEN, ENT networks on 2G and 5G bands in any + combination. We call WifiBaseTest methods get_psk_network(), + get_open_network(), get_wep_network() and get_ent_network() to create + dictionaries which contains this information. 'wifi_configs' is a list of + such dictionaries. Example below configures 2 WiFi networks - 1 PSK 2G and + 1 Open 5G on one AP. configure_ap() is called from WifiBaseTest to + configure the APs. + + wifi_configs = [ + { + '2g': { + 'SSID': '2g_AkqXWPK4', + 'security': 'psk2', + 'password': 'YgYuXqDO9H', + 'hiddenSSID': False + }, + }, + { + '5g': { + 'SSID': '5g_8IcMR1Sg', + 'security': 'none', + 'hiddenSSID': False + }, + } + ] + + Args: + wifi_configs: list of network settings for 2G and 5G bands. + channel_2g: channel for 2G band. + channel_5g: channel for 5G band. + """ + self.wireless_setting = wireless_settings_applier.WirelessSettingsApplier( + self.ssh, + wireless_configs, + channel_2g, + channel_5g, + self.radios[1], + self.radios[0], + ) + self.wireless_setting.apply_wireless_settings() + + def start_ap(self): + """Starts the AP with the settings in /etc/config/wireless.""" + self.ssh.run("wifi up") + curr_time = time.time() + while time.time() < curr_time + WAIT_TIME: + if self.get_wifi_status(): + return + time.sleep(3) + if not self.get_wifi_status(): + raise ValueError("Failed to turn on WiFi on the AP.") + + def stop_ap(self): + """Stops the AP.""" + self.ssh.run("wifi down") + curr_time = time.time() + while time.time() < curr_time + WAIT_TIME: + if not self.get_wifi_status(): + return + time.sleep(3) + if self.get_wifi_status(): + raise ValueError("Failed to turn off WiFi on the AP.") + + def get_bssids_for_wifi_networks(self) -> BSSIDMap: + """Get BSSIDs for wifi networks configured. + + Returns: + Dictionary of SSID - BSSID map for both bands. + """ + bssid_map: BSSIDMap = {"2g": {}, "5g": {}} + for radio in self.radios: + ssid_ifname_map = self.get_ifnames_for_ssids(radio) + if radio == self.radios[0]: + for ssid, ifname in ssid_ifname_map.items(): + bssid_map["5g"][ssid] = self.get_bssid(ifname) + elif radio == self.radios[1]: + for ssid, ifname in ssid_ifname_map.items(): + bssid_map["2g"][ssid] = self.get_bssid(ifname) + return bssid_map + + def get_ifnames_for_ssids(self, radio) -> dict[str, str]: + """Get interfaces for wifi networks. + + Args: + radio: 2g or 5g radio get the bssids from. + + Returns: + dictionary of ssid - ifname mappings. + """ + ssid_ifname_map: dict[str, str] = {} + str_output = self.ssh.run(f"wifi status {radio}").stdout.decode("utf-8") + wifi_status = yaml.load( + str_output.replace("\t", "").replace("\n", ""), Loader=yaml.SafeLoader + ) + wifi_status = wifi_status[radio] + if wifi_status["up"]: + interfaces = wifi_status["interfaces"] + for config in interfaces: + ssid = config["config"]["ssid"] + ifname = config["ifname"] + ssid_ifname_map[ssid] = ifname + return ssid_ifname_map + + def get_bssid(self, ifname): + """Get MAC address from an interface. + + Args: + ifname: interface name of the corresponding MAC. + + Returns: + BSSID of the interface. + """ + ifconfig = self.ssh.run(f"ifconfig {ifname}").stdout.decode("utf-8") + mac_addr = ifconfig.split("\n")[0].split()[-1] + return mac_addr + + def set_wpa_encryption(self, encryption): + """Set different encryptions to wpa or wpa2. + + Args: + encryption: ccmp, tkip, or ccmp+tkip. + """ + str_output = self.ssh.run("wifi status").stdout.decode("utf-8") + wifi_status = yaml.load( + str_output.replace("\t", "").replace("\n", ""), Loader=yaml.SafeLoader + ) + + # Counting how many interface are enabled. + total_interface = 0 + for radio in self.radios: + num_interface = len(wifi_status[radio]["interfaces"]) + total_interface += num_interface + + # Iterates every interface to get and set wpa encryption. + default_extra_interface = 2 + for i in range(total_interface + default_extra_interface): + origin_encryption = self.ssh.run( + f"uci get wireless.@wifi-iface[{i}].encryption" + ).stdout.decode("utf-8") + origin_psk_pattern = re.match(r"psk\b", origin_encryption) + target_psk_pattern = re.match(r"psk\b", encryption) + origin_psk2_pattern = re.match(r"psk2\b", origin_encryption) + target_psk2_pattern = re.match(r"psk2\b", encryption) + + if origin_psk_pattern == target_psk_pattern: + self.ssh.run( + f"uci set wireless.@wifi-iface[{i}].encryption={encryption}" + ) + + if origin_psk2_pattern == target_psk2_pattern: + self.ssh.run( + f"uci set wireless.@wifi-iface[{i}].encryption={encryption}" + ) + + self.ssh.run("uci commit wireless") + self.ssh.run("wifi") + + def set_password(self, pwd_5g=None, pwd_2g=None): + """Set password for individual interface. + + Args: + pwd_5g: 8 ~ 63 chars, ascii letters and digits password for 5g network. + pwd_2g: 8 ~ 63 chars, ascii letters and digits password for 2g network. + """ + if pwd_5g: + if len(pwd_5g) < 8 or len(pwd_5g) > 63: + self.log.error("Password must be 8~63 characters long") + # Only accept ascii letters and digits + elif not re.match("^[A-Za-z0-9]*$", pwd_5g): + self.log.error("Password must only contains ascii letters and digits") + else: + self.ssh.run(f"uci set wireless.@wifi-iface[{3}].key={pwd_5g}") + self.log.info(f"Set 5G password to :{pwd_5g}") + + if pwd_2g: + if len(pwd_2g) < 8 or len(pwd_2g) > 63: + self.log.error("Password must be 8~63 characters long") + # Only accept ascii letters and digits + elif not re.match("^[A-Za-z0-9]*$", pwd_2g): + self.log.error("Password must only contains ascii letters and digits") + else: + self.ssh.run(f"uci set wireless.@wifi-iface[{2}].key={pwd_2g}") + self.log.info(f"Set 2G password to :{pwd_2g}") + + self.ssh.run("uci commit wireless") + self.ssh.run("wifi") + + def set_ssid(self, ssid_5g=None, ssid_2g=None): + """Set SSID for individual interface. + + Args: + ssid_5g: 8 ~ 63 chars for 5g network. + ssid_2g: 8 ~ 63 chars for 2g network. + """ + if ssid_5g: + if len(ssid_5g) < 8 or len(ssid_5g) > 63: + self.log.error("SSID must be 8~63 characters long") + # Only accept ascii letters and digits + else: + self.ssh.run(f"uci set wireless.@wifi-iface[{3}].ssid={ssid_5g}") + self.log.info(f"Set 5G SSID to :{ssid_5g}") + + if ssid_2g: + if len(ssid_2g) < 8 or len(ssid_2g) > 63: + self.log.error("SSID must be 8~63 characters long") + # Only accept ascii letters and digits + else: + self.ssh.run(f"uci set wireless.@wifi-iface[{2}].ssid={ssid_2g}") + self.log.info(f"Set 2G SSID to :{ssid_2g}") + + self.ssh.run("uci commit wireless") + self.ssh.run("wifi") + + def generate_mobility_domain(self): + """Generate 4-character hexadecimal ID. + + Returns: + String; a 4-character hexadecimal ID. + """ + md = f"{random.getrandbits(16):04x}" + self.log.info(f"Mobility Domain ID: {md}") + return md + + def enable_80211r(self, iface, md): + """Enable 802.11r for one single radio. + + Args: + iface: index number of wifi-iface. + 2: radio1 + 3: radio0 + md: mobility domain. a 4-character hexadecimal ID. + Raises: + TestSkip if 2g or 5g radio is not up or 802.11r is not enabled. + """ + str_output = self.ssh.run("wifi status").stdout.decode("utf-8") + wifi_status = yaml.load( + str_output.replace("\t", "").replace("\n", ""), Loader=yaml.SafeLoader + ) + # Check if the radio is up. + if iface == OpenWrtWifiSetting.IFACE_2G: + if wifi_status[self.radios[1]]["up"]: + self.log.info("2g network is ENABLED") + else: + raise signals.TestSkip("2g network is NOT ENABLED") + elif iface == OpenWrtWifiSetting.IFACE_5G: + if wifi_status[self.radios[0]]["up"]: + self.log.info("5g network is ENABLED") + else: + raise signals.TestSkip("5g network is NOT ENABLED") + + # Setup 802.11r. + self.ssh.run(f"uci set wireless.@wifi-iface[{iface}].ieee80211r='1'") + self.ssh.run(f"uci set wireless.@wifi-iface[{iface}].ft_psk_generate_local='1'") + self.ssh.run(f"uci set wireless.@wifi-iface[{iface}].mobility_domain='{md}'") + self.ssh.run("uci commit wireless") + self.ssh.run("wifi") + + # Check if 802.11r is enabled. + result = self.ssh.run( + f"uci get wireless.@wifi-iface[{iface}].ieee80211r" + ).stdout.decode("utf-8") + if result == "1": + self.log.info("802.11r is ENABLED") + else: + raise signals.TestSkip("802.11r is NOT ENABLED") + + def get_wifi_network(self, security=None, band=None): + """Return first match wifi interface's config. + + Args: + security: psk2 or none + band: '2g' or '5g' + + Returns: + A dict contains match wifi interface's config. + """ + if not self.wireless_setting: + raise RuntimeError("The AP has not been configured yet; run configure_ap()") + + for wifi_iface in self.wireless_setting.wireless_configs: + match_list = [] + wifi_network = wifi_iface.__dict__ + if security: + match_list.append(security == wifi_network["security"]) + if band: + match_list.append(band == wifi_network["band"]) + + if all(match_list): + wifi_network["SSID"] = wifi_network["ssid"] + if not wifi_network["password"]: + del wifi_network["password"] + return wifi_network + return None + + def get_wifi_status(self): + """Check if radios are up. Default are 2G and 5G bands. + + Returns: + True if both radios are up. False if not. + """ + status = True + for radio in self.radios: + try: + str_output = self.ssh.run(f"wifi status {radio}").stdout.decode("utf-8") + wifi_status = yaml.load( + str_output.replace("\t", "").replace("\n", ""), + Loader=yaml.SafeLoader, + ) + status = wifi_status[radio]["up"] and status + except: + self.log.info("Failed to make ssh connection to the OpenWrt") + return False + return status + + def verify_wifi_status(self, timeout=20): + """Ensure wifi interfaces are ready. + + Args: + timeout: An integer that is the number of times to try + wait for interface ready. + Returns: + True if both radios are up. False if not. + """ + start_time = time.time() + end_time = start_time + timeout + while time.time() < end_time: + if self.get_wifi_status(): + return True + time.sleep(1) + return False + + def get_model_name(self): + """Get Openwrt model name. + + Returns: + A string include device brand and model. e.g. NETGEAR_R8000 + """ + out = self.ssh.run(SYSTEM_INFO_CMD).stdout.decode("utf-8").split("\n") + for line in out: + if "board_name" in line: + model = line.split()[1].strip('",').split(",") + return "_".join(map(lambda i: i.upper(), model)) + self.log.info("Failed to retrieve OpenWrt model information.") + return None + + def close(self): + """Reset wireless and network settings to default and stop AP.""" + if self.network_setting.config: + self.network_setting.cleanup_network_settings() + if self.wireless_setting: + self.wireless_setting.cleanup_wireless_settings() + + def close_ssh(self): + """Close SSH connection to AP.""" + self.ssh.close() + + def reboot(self): + """Reboot Openwrt.""" + self.ssh.run("reboot")
diff --git a/src/antlion/controllers/openwrt_lib/OWNERS b/packages/antlion/controllers/openwrt_lib/OWNERS similarity index 100% rename from src/antlion/controllers/openwrt_lib/OWNERS rename to packages/antlion/controllers/openwrt_lib/OWNERS
diff --git a/src/antlion/controllers/openwrt_lib/__init__.py b/packages/antlion/controllers/openwrt_lib/__init__.py similarity index 100% rename from src/antlion/controllers/openwrt_lib/__init__.py rename to packages/antlion/controllers/openwrt_lib/__init__.py
diff --git a/src/antlion/controllers/openwrt_lib/network_const.py b/packages/antlion/controllers/openwrt_lib/network_const.py similarity index 99% rename from src/antlion/controllers/openwrt_lib/network_const.py rename to packages/antlion/controllers/openwrt_lib/network_const.py index 3b05b83..7375ff7 100644 --- a/src/antlion/controllers/openwrt_lib/network_const.py +++ b/packages/antlion/controllers/openwrt_lib/network_const.py
@@ -127,7 +127,7 @@ "leftauth": "pubkey", "leftsendcert": "always", "right": "%any", - "rightid": "vpntest@%s" % LOCALHOST, + "rightid": f"vpntest@{LOCALHOST}", "rightauth": "pubkey", "rightcert": "clientCert.pem", "auto": "add",
diff --git a/src/antlion/controllers/openwrt_lib/network_settings.py b/packages/antlion/controllers/openwrt_lib/network_settings.py similarity index 81% rename from src/antlion/controllers/openwrt_lib/network_settings.py rename to packages/antlion/controllers/openwrt_lib/network_settings.py index 5d14360..8a8494c 100644 --- a/src/antlion/controllers/openwrt_lib/network_settings.py +++ b/packages/antlion/controllers/openwrt_lib/network_settings.py
@@ -15,7 +15,8 @@ import re import time -from antlion import signals +from mobly import signals + from antlion import utils from antlion.controllers.openwrt_lib import network_const @@ -59,7 +60,6 @@ log: Logging object for AccessPoint. config: A list to store changes on network settings. firewall_rules_list: A list of firewall rule name list. - cleanup_map: A dict for compare oppo functions. l2tp: profile for vpn l2tp server. """ @@ -79,20 +79,6 @@ self.log = logger self.config = set() self.firewall_rules_list = [] - self.cleanup_map = { - "setup_dns_server": self.remove_dns_server, - "setup_vpn_pptp_server": self.remove_vpn_pptp_server, - "setup_vpn_l2tp_server": self.remove_vpn_l2tp_server, - "disable_ipv6": self.enable_ipv6, - "setup_ipv6_bridge": self.remove_ipv6_bridge, - "default_dns": self.del_default_dns, - "default_v6_dns": self.del_default_v6_dns, - "ipv6_prefer_option": self.remove_ipv6_prefer_option, - "block_dns_response": self.unblock_dns_response, - "setup_mdns": self.remove_mdns, - "add_dhcp_rapid_commit": self.remove_dhcp_rapid_commit, - "setup_captive_portal": self.remove_cpative_portal, - } # This map contains cleanup functions to restore the configuration to # its default state. We write these keys to HISTORY_CONFIG_PATH prior to # making any changes to that subsystem. @@ -106,7 +92,7 @@ # Detect if any changes that is not clean up. if self.file_exists(HISTORY_CONFIG_PATH): - out = self.ssh.run("cat %s" % HISTORY_CONFIG_PATH).stdout + out = self.ssh.run(f"cat {HISTORY_CONFIG_PATH}").stdout if out: self.config = set(out.split("\n")) @@ -114,16 +100,48 @@ temp = self.config.copy() for change in temp: change_list = change.split() - if len(change_list) > 1: - self.cleanup_map[change_list[0]](*change_list[1:]) + + command = change_list[0] + args = change_list[1:] + if command == "setup_dns_server": + self.remove_dns_server() + elif command == "setup_vpn_pptp_server": + self.remove_vpn_pptp_server() + elif command == "setup_vpn_l2tp_server": + self.remove_vpn_l2tp_server() + elif command == "disable_ipv6": + self.enable_ipv6() + elif command == "setup_ipv6_bridge": + self.remove_ipv6_bridge() + elif command == "default_dns": + addr_list = str(change_list[1]) + self.del_default_dns(addr_list) + elif command == "default_v6_dns": + addr_list = str(change_list[1]) + self.del_default_v6_dns(addr_list) + elif command == "ipv6_prefer_option": + self.remove_ipv6_prefer_option() + elif command == "block_dns_response": + self.unblock_dns_response() + elif command == "setup_mdns": + self.remove_mdns() + elif command == "add_dhcp_rapid_commit": + self.remove_dhcp_rapid_commit() + elif command == "setup_captive_portal": + try: + fas_port = int(change_list[1]) + except IndexError: + fas_port = 1000 + self.remove_cpative_portal(fas_port) else: - self.cleanup_map[change]() + raise TypeError(f'Unknown command "{change}"') + self.config = set() if self.file_exists(HISTORY_CONFIG_PATH): - out = self.ssh.run("cat %s" % HISTORY_CONFIG_PATH).stdout + out = self.ssh.run(f"cat {HISTORY_CONFIG_PATH}").stdout if not out: - self.ssh.run("rm %s" % HISTORY_CONFIG_PATH) + self.ssh.run(f"rm {HISTORY_CONFIG_PATH}") def commit_changes(self): """Apply changes on Access point.""" @@ -142,12 +160,12 @@ for package_name in package_list.split(" "): if not self._package_installed(package_name): self.ssh.run( - "opkg install %s" % package_name, + f"opkg install {package_name}", timeout=DEFAULT_PACKAGE_INSTALL_TIMEOUT, ) - self.log.info("Package: %s installed." % package_name) + self.log.info(f"Package: {package_name} installed.") else: - self.log.info("Package: %s skipped (already installed)." % package_name) + self.log.info(f"Package: {package_name} skipped (already installed).") def package_remove(self, package_list): """Remove packages on OpenWrtAP via opkg If existed. @@ -157,10 +175,10 @@ """ for package_name in package_list.split(" "): if self._package_installed(package_name): - self.ssh.run("opkg remove %s" % package_name) - self.log.info("Package: %s removed." % package_name) + self.ssh.run(f"opkg remove {package_name}") + self.log.info(f"Package: {package_name} removed.") else: - self.log.info("No exist package %s found." % package_name) + self.log.info(f"No exist package {package_name} found.") def _package_installed(self, package_name): """Check if target package installed on OpenWrtAP. @@ -171,7 +189,7 @@ Returns: True if installed. """ - if self.ssh.run("opkg list-installed %s" % package_name).stdout: + if self.ssh.run(f"opkg list-installed {package_name}").stdout: return True return False @@ -185,9 +203,7 @@ True if Existed. """ path, file_name = abs_file_path.rsplit("/", 1) - if self.ssh.run( - "ls %s | grep %s" % (path, file_name), ignore_status=True - ).stdout: + if self.ssh.run(f"ls {path} | grep {file_name}", ignore_status=True).stdout: return True return False @@ -198,7 +214,7 @@ abs_path: absolutely path for create folder. """ try: - self.ssh.run("ls %s" % abs_path) + self.ssh.run(f"ls {abs_path}") except: return False return True @@ -210,9 +226,9 @@ abs_path: absolutely path for create folder. """ if not self.path_exists(abs_path): - self.ssh.run("mkdir %s" % abs_path) + self.ssh.run(f"mkdir {abs_path}") else: - self.log.info("%s already existed." % abs_path) + self.log.info(f"{abs_path} already existed.") def count(self, config, key): """Count in uci config. @@ -224,7 +240,7 @@ Numbers of the count. """ count = self.ssh.run( - "uci show %s | grep =%s" % (config, key), ignore_status=True + f"uci show {config} | grep ={key}", ignore_status=True ).stdout return len(count.split("\n")) @@ -235,7 +251,7 @@ config: A string of content of config. file_path: Config's abs_path. """ - self.ssh.run('echo -e "%s" > %s' % (config, file_path)) + self.ssh.run(f'echo -e "{config}" > {file_path}') def replace_config_option(self, old_option, new_option, file_path): """Replace config option if pattern match. @@ -248,10 +264,10 @@ new_option: the option to add. file_path: Config's abs_path. """ - config = self.ssh.run("cat %s" % file_path).stdout + config = self.ssh.run(f"cat {file_path}").stdout config, count = re.subn(old_option, new_option, config) if not count: - config = "\n".join([config, new_option]) + config = f"{config}\n{new_option}" self.create_config_file(config, file_path) def remove_config_option(self, option, file_path): @@ -263,7 +279,7 @@ Returns: Boolean for find option to remove. """ - config = self.ssh.run("cat %s" % file_path).stdout.split("\n") + config = self.ssh.run(f"cat {file_path}").stdout.split("\n") for line in config: count = re.subn(option, "", line)[1] if count > 0: @@ -280,9 +296,9 @@ domain_name: Local dns domain name. """ self.config.add("setup_dns_server") - self.log.info("Setup DNS server with domain name %s" % domain_name) - self.ssh.run("uci set dhcp.@dnsmasq[0].local='/%s/'" % domain_name) - self.ssh.run("uci set dhcp.@dnsmasq[0].domain='%s'" % domain_name) + self.log.info(f"Setup DNS server with domain name {domain_name}") + self.ssh.run(f"uci set dhcp.@dnsmasq[0].local='/{domain_name}/'") + self.ssh.run(f"uci set dhcp.@dnsmasq[0].domain='{domain_name}'") self.add_resource_record(domain_name, self.ip) self.service_manager.need_restart(SERVICE_DNSMASQ) self.commit_changes() @@ -315,8 +331,8 @@ domain_ip: A string for domain ip. """ self.ssh.run("uci add dhcp domain") - self.ssh.run("uci set dhcp.@domain[-1].name='%s'" % domain_name) - self.ssh.run("uci set dhcp.@domain[-1].ip='%s'" % domain_ip) + self.ssh.run(f"uci set dhcp.@domain[-1].name='{domain_name}'") + self.ssh.run(f"uci set dhcp.@domain[-1].ip='{domain_ip}'") self.service_manager.need_restart(SERVICE_DNSMASQ) def del_resource_record(self): @@ -401,16 +417,16 @@ remote_ip = ".".join(remote_ip) # Enable pptp service and set ip addr self.ssh.run("uci set pptpd.pptpd.enabled=1") - self.ssh.run("uci set pptpd.pptpd.localip='%s'" % local_ip) - self.ssh.run("uci set pptpd.pptpd.remoteip='%s-250'" % remote_ip) + self.ssh.run(f"uci set pptpd.pptpd.localip='{local_ip}'") + self.ssh.run(f"uci set pptpd.pptpd.remoteip='{remote_ip}-250'") # Setup pptp service account - self.ssh.run("uci set pptpd.@login[0].username='%s'" % username) - self.ssh.run("uci set pptpd.@login[0].password='%s'" % password) + self.ssh.run(f"uci set pptpd.@login[0].username='{username}'") + self.ssh.run(f"uci set pptpd.@login[0].password='{password}'") self.service_manager.need_restart(SERVICE_PPTPD) self.replace_config_option( - r"#*ms-dns \d+.\d+.\d+.\d+", "ms-dns %s" % ms_dns, PPTPD_OPTION_PATH + r"#*ms-dns \d+.\d+.\d+.\d+", f"ms-dns {ms_dns}", PPTPD_OPTION_PATH ) self.replace_config_option("(#no)*proxyarp", "proxyarp", PPTPD_OPTION_PATH) @@ -502,7 +518,7 @@ " plugins {", " include strongswan.d/charon/*.conf", " }", - " dns1=%s" % dns, + f" dns1={dns}", "}", ] self.create_config_file("\n".join(config), "/etc/strongswan.conf") @@ -510,19 +526,19 @@ def setup_ipsec(self): """Setup ipsec config.""" + config: list[str] = [] + def load_ipsec_config(data, rightsourceip=False): for i in data.keys(): config.append(i) for j in data[i].keys(): - config.append("\t %s=%s" % (j, data[i][j])) + config.append(f"\t {j}={data[i][j]}") if rightsourceip: config.append( - "\t rightsourceip=%s.16/26" - % self.l2tp.address.rsplit(".", 1)[0] + f"\t rightsourceip={self.l2tp.address.rsplit('.', 1)[0]}.16/26" ) config.append("") - config = [] load_ipsec_config(network_const.IPSEC_IKEV2_MSCHAPV2, True) load_ipsec_config(network_const.IPSEC_IKEV2_PSK, True) load_ipsec_config(network_const.IPSEC_IKEV2_RSA, True) @@ -549,25 +565,24 @@ """Setup xl2tpd config.""" net_id, host_id = self.l2tp.address.rsplit(".", 1) xl2tpd_conf = list(network_const.XL2TPD_CONF_GLOBAL) - xl2tpd_conf.append("auth file = %s" % PPP_CHAP_SECRET_PATH) + xl2tpd_conf.append(f"auth file = {PPP_CHAP_SECRET_PATH}") xl2tpd_conf.extend(network_const.XL2TPD_CONF_INS) xl2tpd_conf.append( - "ip range = %s.%s-%s.%s" - % (net_id, host_id, net_id, str(int(host_id) + ip_range)) + f"ip range = {net_id}.{host_id}-{net_id}.{str(int(host_id) + ip_range)}" ) - xl2tpd_conf.append("local ip = %s" % self.l2tp.address) - xl2tpd_conf.append("name = %s" % self.l2tp.name) - xl2tpd_conf.append("pppoptfile = %s" % XL2TPD_OPTION_CONFIG_PATH) + xl2tpd_conf.append(f"local ip = {self.l2tp.address}") + xl2tpd_conf.append(f"name = {self.l2tp.name}") + xl2tpd_conf.append(f"pppoptfile = {XL2TPD_OPTION_CONFIG_PATH}") self.create_config_file("\n".join(xl2tpd_conf), XL2TPD_CONFIG_PATH) xl2tpd_option = list(network_const.XL2TPD_OPTION) - xl2tpd_option.append("name %s" % self.l2tp.name) + xl2tpd_option.append(f"name {self.l2tp.name}") self.create_config_file("\n".join(xl2tpd_option), XL2TPD_OPTION_CONFIG_PATH) def setup_ppp_secret(self): self.replace_config_option( r"\S+ %s \S+ \*" % self.l2tp.name, - "%s %s %s *" % (self.l2tp.username, self.l2tp.name, self.l2tp.password), + f"{self.l2tp.username} {self.l2tp.name} {self.l2tp.password} *", PPP_CHAP_SECRET_PATH, ) @@ -577,15 +592,13 @@ lifetime = "--lifetime 365" size = "--size 4096" - self.ssh.run("ipsec pki --gen %s %s --outform der > caKey.der" % (rsa, size)) + self.ssh.run(f"ipsec pki --gen {rsa} {size} --outform der > caKey.der") self.ssh.run( "ipsec pki --self --ca %s --in caKey.der %s --dn " '"C=%s, O=%s, CN=%s" --outform der > caCert.der' % (lifetime, rsa, country, org, self.l2tp.hostname) ) - self.ssh.run( - "ipsec pki --gen %s %s --outform der > serverKey.der" % (size, rsa) - ) + self.ssh.run(f"ipsec pki --gen {size} {rsa} --outform der > serverKey.der") self.ssh.run( "ipsec pki --pub --in serverKey.der %s | ipsec pki " "--issue %s --cacert caCert.der --cakey caKey.der " @@ -593,9 +606,7 @@ " --flag ikeIntermediate --outform der > serverCert.der" % (rsa, lifetime, country, org, self.l2tp.hostname, LOCALHOST) ) - self.ssh.run( - "ipsec pki --gen %s %s --outform der > clientKey.der" % (size, rsa) - ) + self.ssh.run(f"ipsec pki --gen {size} {rsa} --outform der > clientKey.der") self.ssh.run( "ipsec pki --pub --in clientKey.der %s | ipsec pki " "--issue %s --cacert caCert.der --cakey caKey.der " @@ -637,11 +648,11 @@ self.ssh.run("mkdir /www/downloads/") ikev2_vpn_cert_keys = [ - "ipsec pki --gen %s %s --outform der > caKey.der" % (rsa, size), + f"ipsec pki --gen {rsa} {size} --outform der > caKey.der", "ipsec pki --self --ca %s --in caKey.der %s --dn " '"C=%s, O=%s, CN=%s" --outform der > caCert.der' % (lifetime, rsa, country, org, self.l2tp.hostname), - "ipsec pki --gen %s %s --outform der > serverKey.der" % (size, rsa), + f"ipsec pki --gen {size} {rsa} --outform der > serverKey.der", "ipsec pki --pub --in serverKey.der %s | ipsec pki --issue %s " r"--cacert caCert.der --cakey caKey.der --dn \"C=%s, O=%s, CN=%s\" " "--san %s --san %s --flag serverAuth --flag ikeIntermediate " @@ -655,7 +666,7 @@ LOCALHOST, self.l2tp.hostname, ), - "ipsec pki --gen %s %s --outform der > clientKey.der" % (size, rsa), + f"ipsec pki --gen {size} {rsa} --outform der > clientKey.der", "ipsec pki --pub --in clientKey.der %s | ipsec pki --issue %s " r"--cacert caCert.der --cakey caKey.der --dn \"C=%s, O=%s, CN=%s@%s\" " r"--san \"%s\" --san \"%s@%s\" --san \"%s@%s\" --outform der " @@ -689,14 +700,14 @@ file_string = "\n".join(ikev2_vpn_cert_keys) self.create_config_file(file_string, IKEV2_VPN_CERT_KEYS_PATH) - self.ssh.run("chmod +x %s" % IKEV2_VPN_CERT_KEYS_PATH) - self.ssh.run("%s" % IKEV2_VPN_CERT_KEYS_PATH) + self.ssh.run(f"chmod +x {IKEV2_VPN_CERT_KEYS_PATH}") + self.ssh.run(f"{IKEV2_VPN_CERT_KEYS_PATH}") def update_firewall_rules_list(self): """Update rule list in /etc/config/firewall.""" new_rules_list = [] for i in range(self.count("firewall", "rule")): - rule = self.ssh.run("uci get firewall.@rule[%s].name" % i).stdout + rule = self.ssh.run(f"uci get firewall.@rule[{i}].name").stdout new_rules_list.append(rule) self.firewall_rules_list = new_rules_list @@ -728,12 +739,12 @@ self.update_firewall_rules_list() if "pptpd" in self.firewall_rules_list: self.ssh.run( - "uci del firewall.@rule[%s]" % self.firewall_rules_list.index("pptpd") + f"uci del firewall.@rule[{self.firewall_rules_list.index('pptpd')}]" ) self.update_firewall_rules_list() if "GRP" in self.firewall_rules_list: self.ssh.run( - "uci del firewall.@rule[%s]" % self.firewall_rules_list.index("GRP") + f"uci del firewall.@rule[{self.firewall_rules_list.index('GRP')}]" ) self.remove_custom_firewall_rules() self.service_manager.need_restart(SERVICE_FIREWALL) @@ -765,10 +776,9 @@ net_id = self.l2tp.address.rsplit(".", 1)[0] iptable_rules = list(network_const.FIREWALL_RULES_FOR_L2TP) - iptable_rules.append("iptables -A FORWARD -s %s.0/24" " -j ACCEPT" % net_id) + iptable_rules.append(f"iptables -A FORWARD -s {net_id}.0/24 -j ACCEPT") iptable_rules.append( - "iptables -t nat -A POSTROUTING" - " -s %s.0/24 -o eth0.2 -j MASQUERADE" % net_id + f"iptables -t nat -A POSTROUTING -s {net_id}.0/24 -o eth0.2 -j MASQUERADE" ) self.add_custom_firewall_rules(iptable_rules) @@ -779,8 +789,7 @@ self.update_firewall_rules_list() if "ipsec esp" in self.firewall_rules_list: self.ssh.run( - "uci del firewall.@rule[%s]" - % self.firewall_rules_list.index("ipsec esp") + f"uci del firewall.@rule[{self.firewall_rules_list.index('ipsec esp')}]" ) self.update_firewall_rules_list() if "ipsec nat-t" in self.firewall_rules_list: @@ -803,20 +812,20 @@ Args: rules: A list of iptable rules to apply. """ - backup_file_path = FIREWALL_CUSTOM_OPTION_PATH + ".backup" + backup_file_path = f"{FIREWALL_CUSTOM_OPTION_PATH}.backup" if not self.file_exists(backup_file_path): - self.ssh.run("mv %s %s" % (FIREWALL_CUSTOM_OPTION_PATH, backup_file_path)) + self.ssh.run(f"mv {FIREWALL_CUSTOM_OPTION_PATH} {backup_file_path}") for rule in rules: - self.ssh.run("echo %s >> %s" % (rule, FIREWALL_CUSTOM_OPTION_PATH)) + self.ssh.run(f"echo {rule} >> {FIREWALL_CUSTOM_OPTION_PATH}") def remove_custom_firewall_rules(self): """Clean up and recover custom firewall rules.""" - backup_file_path = FIREWALL_CUSTOM_OPTION_PATH + ".backup" + backup_file_path = f"{FIREWALL_CUSTOM_OPTION_PATH}.backup" if self.file_exists(backup_file_path): - self.ssh.run("mv %s %s" % (backup_file_path, FIREWALL_CUSTOM_OPTION_PATH)) + self.ssh.run(f"mv {backup_file_path} {FIREWALL_CUSTOM_OPTION_PATH}") else: - self.log.debug("Did not find %s" % backup_file_path) - self.ssh.run("echo " " > %s" % FIREWALL_CUSTOM_OPTION_PATH) + self.log.debug(f"Did not find {backup_file_path}") + self.ssh.run(f"echo > {FIREWALL_CUSTOM_OPTION_PATH}") def disable_pptp_service(self): """Disable pptp service.""" @@ -828,7 +837,7 @@ self.ssh.run("uci set network.lan2.type=bridge") self.ssh.run("uci set network.lan2.ifname=eth1.2") self.ssh.run("uci set network.lan2.proto=static") - self.ssh.run('uci set network.lan2.ipaddr="%s"' % self.l2tp.address) + self.ssh.run(f'uci set network.lan2.ipaddr="{self.l2tp.address}"') self.ssh.run("uci set network.lan2.netmask=255.255.255.0") self.ssh.run("uci set network.lan2=interface") self.service_manager.reload(SERVICE_NETWORK) @@ -892,10 +901,10 @@ self.commit_changes() def _add_dhcp_option(self, args): - self.ssh.run('uci add_list dhcp.lan.dhcp_option="%s"' % args) + self.ssh.run(f'uci add_list dhcp.lan.dhcp_option="{args}"') def _remove_dhcp_option(self, args): - self.ssh.run('uci del_list dhcp.lan.dhcp_option="%s"' % args) + self.ssh.run(f'uci del_list dhcp.lan.dhcp_option="{args}"') def add_default_dns(self, addr_list): """Add default dns server for client. @@ -903,41 +912,41 @@ Args: addr_list: dns ip address for Openwrt client. """ - self._add_dhcp_option("6,%s" % ",".join(addr_list)) - self.config.add("default_dns %s" % addr_list) + self._add_dhcp_option(f'6,{",".join(addr_list)}') + self.config.add(f"default_dns {addr_list}") self.service_manager.need_restart(SERVICE_DNSMASQ) self.commit_changes() - def del_default_dns(self, addr_list): + def del_default_dns(self, addr_list: str): """Remove default dns server for client. Args: addr_list: list of dns ip address for Openwrt client. """ - self._remove_dhcp_option("6,%s" % addr_list) - self.config.discard("default_dns %s" % addr_list) + self._remove_dhcp_option(f"6,{addr_list}") + self.config.discard(f"default_dns {addr_list}") self.service_manager.need_restart(SERVICE_DNSMASQ) self.commit_changes() - def add_default_v6_dns(self, addr_list): + def add_default_v6_dns(self, addr_list: str): """Add default v6 dns server for client. Args: - addr_list: dns ip address for Openwrt client. + addr_list: list of dns ip address for Openwrt client. """ - self.ssh.run('uci add_list dhcp.lan.dns="%s"' % addr_list) - self.config.add("default_v6_dns %s" % addr_list) + self.ssh.run(f'uci add_list dhcp.lan.dns="{addr_list}"') + self.config.add(f"default_v6_dns {addr_list}") self.service_manager.need_restart(SERVICE_ODHCPD) self.commit_changes() - def del_default_v6_dns(self, addr_list): + def del_default_v6_dns(self, addr_list: str): """Del default v6 dns server for client. Args: - addr_list: dns ip address for Openwrt client. + addr_list: list of dns ip address for Openwrt client. """ - self.ssh.run('uci del_list dhcp.lan.dns="%s"' % addr_list) - self.config.add("default_v6_dns %s" % addr_list) + self.ssh.run(f'uci del_list dhcp.lan.dns="{addr_list}"') + self.config.add(f"default_v6_dns {addr_list}") self.service_manager.need_restart(SERVICE_ODHCPD) self.commit_changes() @@ -978,13 +987,11 @@ """ self.package_install("tcpdump") if not self.path_exists(TCPDUMP_DIR): - self.ssh.run("mkdir %s" % TCPDUMP_DIR) - tcpdump_file_name = "openwrt_%s_%s.pcap" % ( - test_name, - time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime(time.time())), - ) - tcpdump_file_path = "".join([TCPDUMP_DIR, tcpdump_file_name]) - cmd = "tcpdump -i %s -s0 %s -w %s" % (interface, args, tcpdump_file_path) + self.ssh.run(f"mkdir {TCPDUMP_DIR}") + now = (time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime(time.time())),) + tcpdump_file_name = f"openwrt_{test_name}_{now}.pcap" + tcpdump_file_path = f"{TCPDUMP_DIR}{tcpdump_file_name}" + cmd = f"tcpdump -i {interface} -s0 {args} -w {tcpdump_file_path}" self.ssh.run_async(cmd) pid = self._get_tcpdump_pid(tcpdump_file_name) if not pid: @@ -1005,17 +1012,17 @@ # Set delay to prevent tcpdump fail to capture target packet. time.sleep(15) pid = self._get_tcpdump_pid(tcpdump_file_name) - self.ssh.run("kill -9 %s" % pid, ignore_status=True) + self.ssh.run(f"kill -9 {pid}", ignore_status=True) if self.path_exists(TCPDUMP_DIR) and pull_dir: - tcpdump_path = "".join([TCPDUMP_DIR, tcpdump_file_name]) - tcpdump_remote_path = "/".join([pull_dir, tcpdump_file_name]) - tcpdump_local_path = "%s@%s:%s" % (self.user, self.ip, tcpdump_path) - utils.exe_cmd("scp %s %s" % (tcpdump_local_path, tcpdump_remote_path)) + tcpdump_path = f"{TCPDUMP_DIR}{tcpdump_file_name}" + tcpdump_remote_path = f"{pull_dir}/{tcpdump_file_name}" + tcpdump_local_path = f"{self.user}@{self.ip}:{tcpdump_path}" + utils.exe_cmd(f"scp {tcpdump_local_path} {tcpdump_remote_path}") if self._get_tcpdump_pid(tcpdump_file_name): raise signals.TestFailure("Failed to stop tcpdump on OpenWrt.") if self.file_exists(tcpdump_path): - self.ssh.run("rm -f %s" % tcpdump_path) + self.ssh.run(f"rm -f {tcpdump_path}") return tcpdump_remote_path if pull_dir else None def clear_tcpdump(self): @@ -1023,13 +1030,11 @@ if self.ssh.run("pgrep tcpdump", ignore_status=True).stdout: raise signals.TestFailure("Failed to clean up tcpdump process.") if self.path_exists(TCPDUMP_DIR): - self.ssh.run("rm -f %s/*" % TCPDUMP_DIR) + self.ssh.run(f"rm -f {TCPDUMP_DIR}/*") def _get_tcpdump_pid(self, tcpdump_file_name): """Check tcpdump process on OpenWrt.""" - return self.ssh.run( - "pgrep -f %s" % (tcpdump_file_name), ignore_status=True - ).stdout + return self.ssh.run(f"pgrep -f {tcpdump_file_name}", ignore_status=True).stdout def setup_mdns(self): self.config.add("setup_mdns") @@ -1062,18 +1067,18 @@ fas_port: Port for captive portal page. """ self.package_install(CAPTIVE_PORTAL_PACKAGE) - self.config.add("setup_captive_portal %s" % fas_port) + self.config.add(f"setup_captive_portal {fas_port}") self.ssh.run("uci set opennds.@opennds[0].fas_secure_enabled=2") self.ssh.run("uci set opennds.@opennds[0].gatewayport=2050") - self.ssh.run("uci set opennds.@opennds[0].fasport=%s" % fas_port) - self.ssh.run("uci set opennds.@opennds[0].fasremotefqdn=%s" % fas_fdqn) + self.ssh.run(f"uci set opennds.@opennds[0].fasport={fas_port}") + self.ssh.run(f"uci set opennds.@opennds[0].fasremotefqdn={fas_fdqn}") self.ssh.run('uci set opennds.@opennds[0].faspath="/nds/fas-aes.php"') self.ssh.run("uci set opennds.@opennds[0].faskey=1234567890") self.service_manager.need_restart(SERVICE_OPENNDS) # Config uhttpd self.ssh.run("uci set uhttpd.main.interpreter=.php=/usr/bin/php-cgi") - self.ssh.run("uci add_list uhttpd.main.listen_http=0.0.0.0:%s" % fas_port) - self.ssh.run("uci add_list uhttpd.main.listen_http=[::]:%s" % fas_port) + self.ssh.run(f"uci add_list uhttpd.main.listen_http=0.0.0.0:{fas_port}") + self.ssh.run(f"uci add_list uhttpd.main.listen_http=[::]:{fas_port}") self.service_manager.need_restart(SERVICE_UHTTPD) # cp fas-aes.php self.create_folder("/www/nds/") @@ -1082,7 +1087,7 @@ self.add_resource_record(fas_fdqn, LOCALHOST) self.commit_changes() - def remove_cpative_portal(self, fas_port=2080): + def remove_cpative_portal(self, fas_port: int = 2080): """Remove captive portal. Args: @@ -1096,12 +1101,12 @@ self.clear_resource_record() # Restore uhttpd self.ssh.run("uci del uhttpd.main.interpreter") - self.ssh.run("uci del_list uhttpd.main.listen_http='0.0.0.0:%s'" % fas_port) - self.ssh.run("uci del_list uhttpd.main.listen_http='[::]:%s'" % fas_port) + self.ssh.run(f"uci del_list uhttpd.main.listen_http='0.0.0.0:{fas_port}'") + self.ssh.run(f"uci del_list uhttpd.main.listen_http='[::]:{fas_port}'") self.service_manager.need_restart(SERVICE_UHTTPD) # Clean web root self.ssh.run("rm -r /www/nds") - self.config.discard("setup_captive_portal %s" % fas_port) + self.config.discard(f"setup_captive_portal {fas_port}") self.commit_changes() @@ -1119,19 +1124,19 @@ def enable(self, service_name): """Enable service auto start.""" - self.ssh.run("/etc/init.d/%s enable" % service_name) + self.ssh.run(f"/etc/init.d/{service_name} enable") def disable(self, service_name): """Disable service auto start.""" - self.ssh.run("/etc/init.d/%s disable" % service_name) + self.ssh.run(f"/etc/init.d/{service_name} disable") def restart(self, service_name): """Restart the service.""" - self.ssh.run("/etc/init.d/%s restart" % service_name) + self.ssh.run(f"/etc/init.d/{service_name} restart") def reload(self, service_name): """Restart the service.""" - self.ssh.run("/etc/init.d/%s reload" % service_name) + self.ssh.run(f"/etc/init.d/{service_name} reload") def restart_services(self): """Restart all services need to restart.""" @@ -1143,7 +1148,7 @@ def stop(self, service_name): """Stop the service.""" - self.ssh.run("/etc/init.d/%s stop" % service_name) + self.ssh.run(f"/etc/init.d/{service_name} stop") def need_restart(self, service_name): self._need_restart.add(service_name)
diff --git a/src/antlion/controllers/openwrt_lib/openwrt_constants.py b/packages/antlion/controllers/openwrt_lib/openwrt_constants.py similarity index 100% rename from src/antlion/controllers/openwrt_lib/openwrt_constants.py rename to packages/antlion/controllers/openwrt_lib/openwrt_constants.py
diff --git a/src/antlion/controllers/openwrt_lib/wireless_config.py b/packages/antlion/controllers/openwrt_lib/wireless_config.py similarity index 70% rename from src/antlion/controllers/openwrt_lib/wireless_config.py rename to packages/antlion/controllers/openwrt_lib/wireless_config.py index 9cdb309..d97e197 100644 --- a/src/antlion/controllers/openwrt_lib/wireless_config.py +++ b/packages/antlion/controllers/openwrt_lib/wireless_config.py
@@ -1,6 +1,6 @@ """Class for Wireless config.""" -NET_IFACE = "lan" +from antlion.controllers.ap_lib.hostapd_security import OpenWRTEncryptionMode class WirelessConfig(object): @@ -24,19 +24,19 @@ def __init__( self, - name, - ssid, - security, - band, - iface=NET_IFACE, - password=None, - wep_key=None, - wep_key_num=1, - radius_server_ip=None, - radius_server_port=None, - radius_server_secret=None, - hidden=False, - ieee80211w=None, + name: str, + ssid: str, + security: OpenWRTEncryptionMode, + band: str, + iface: str = "lan", + password: str | None = None, + wep_key: list[str] | None = None, + wep_key_num: int = 1, + radius_server_ip: str | None = None, + radius_server_port: int | None = None, + radius_server_secret: str | None = None, + hidden: bool = False, + ieee80211w: int | None = None, ): self.name = name self.ssid = ssid
diff --git a/src/antlion/controllers/openwrt_lib/wireless_settings_applier.py b/packages/antlion/controllers/openwrt_lib/wireless_settings_applier.py similarity index 61% rename from src/antlion/controllers/openwrt_lib/wireless_settings_applier.py rename to packages/antlion/controllers/openwrt_lib/wireless_settings_applier.py index d899a30..da0d2d7 100644 --- a/src/antlion/controllers/openwrt_lib/wireless_settings_applier.py +++ b/packages/antlion/controllers/openwrt_lib/wireless_settings_applier.py
@@ -3,8 +3,11 @@ import time from antlion.controllers.ap_lib import hostapd_constants -from antlion.controllers.openwrt_lib.network_settings import SERVICE_DNSMASQ -from antlion.controllers.openwrt_lib.network_settings import ServiceManager +from antlion.controllers.openwrt_lib.network_settings import ( + SERVICE_DNSMASQ, + ServiceManager, +) +from antlion.controllers.openwrt_lib.wireless_config import WirelessConfig LEASE_FILE = "/tmp/dhcp.leases" OPEN_SECURITY = "none" @@ -48,7 +51,7 @@ """ self.ssh = ssh self.service_manager = ServiceManager(ssh) - self.wireless_configs = configs + self.wireless_configs: list[WirelessConfig] = configs self.channel_2g = channel_2g self.channel_5g = channel_5g self.radio_2g = radio_2g @@ -56,52 +59,38 @@ def apply_wireless_settings(self): """Configure wireless settings from a list of configs.""" - default_2g_iface = "default_" + self.radio_2g - default_5g_iface = "default_" + self.radio_5g + default_2g_iface = f"default_{self.radio_2g}" + default_5g_iface = f"default_{self.radio_5g}" # set channels for 2G and 5G bands - self.ssh.run( - "uci set wireless.%s.channel='%s'" % (self.radio_2g, self.channel_2g) - ) - self.ssh.run( - "uci set wireless.%s.channel='%s'" % (self.radio_5g, self.channel_5g) - ) + self.ssh.run(f"uci set wireless.{self.radio_2g}.channel='{self.channel_2g}'") + self.ssh.run(f"uci set wireless.{self.radio_5g}.channel='{self.channel_5g}'") if self.channel_5g == 165: - self.ssh.run("uci set wireless.%s.htmode='VHT20'" % self.radio_5g) + self.ssh.run(f"uci set wireless.{self.radio_5g}.htmode='VHT20'") elif self.channel_5g == 132 or self.channel_5g == 136: self.ssh.run("iw reg set ZA") - self.ssh.run("uci set wireless.%s.htmode='VHT40'" % self.radio_5g) + self.ssh.run(f"uci set wireless.{self.radio_5g}.htmode='VHT40'") if self.channel_2g == 13: self.ssh.run("iw reg set AU") # disable default OpenWrt SSID - self.ssh.run( - "uci set wireless.%s.disabled='%s'" % (default_2g_iface, DISABLE_RADIO) - ) - self.ssh.run( - "uci set wireless.%s.disabled='%s'" % (default_5g_iface, DISABLE_RADIO) - ) + self.ssh.run(f"uci set wireless.{default_2g_iface}.disabled='{DISABLE_RADIO}'") + self.ssh.run(f"uci set wireless.{default_5g_iface}.disabled='{DISABLE_RADIO}'") # Enable radios - self.ssh.run( - "uci set wireless.%s.disabled='%s'" % (self.radio_2g, ENABLE_RADIO) - ) - self.ssh.run( - "uci set wireless.%s.disabled='%s'" % (self.radio_5g, ENABLE_RADIO) - ) + self.ssh.run(f"uci set wireless.{self.radio_2g}.disabled='{ENABLE_RADIO}'") + self.ssh.run(f"uci set wireless.{self.radio_5g}.disabled='{ENABLE_RADIO}'") for config in self.wireless_configs: # configure open network if config.security == OPEN_SECURITY: if config.band == hostapd_constants.BAND_2G: self.ssh.run( - "uci set wireless.%s.ssid='%s'" - % (default_2g_iface, config.ssid) + f"uci set wireless.{default_2g_iface}.ssid='{config.ssid}'" ) self.ssh.run( - "uci set wireless.%s.disabled='%s'" - % (default_2g_iface, ENABLE_RADIO) + f"uci set wireless.{default_2g_iface}.disabled='{ENABLE_RADIO}'" ) if config.hidden: self.ssh.run( @@ -110,12 +99,10 @@ ) elif config.band == hostapd_constants.BAND_5G: self.ssh.run( - "uci set wireless.%s.ssid='%s'" - % (default_5g_iface, config.ssid) + f"uci set wireless.{default_5g_iface}.ssid='{config.ssid}'" ) self.ssh.run( - "uci set wireless.%s.disabled='%s'" - % (default_5g_iface, ENABLE_RADIO) + f"uci set wireless.{default_5g_iface}.disabled='{ENABLE_RADIO}'" ) if config.hidden: self.ssh.run( @@ -124,22 +111,16 @@ ) continue - self.ssh.run("uci set wireless.%s='wifi-iface'" % config.name) + self.ssh.run(f"uci set wireless.{config.name}='wifi-iface'") if config.band == hostapd_constants.BAND_2G: - self.ssh.run( - "uci set wireless.%s.device='%s'" % (config.name, self.radio_2g) - ) + self.ssh.run(f"uci set wireless.{config.name}.device='{self.radio_2g}'") else: - self.ssh.run( - "uci set wireless.%s.device='%s'" % (config.name, self.radio_5g) - ) + self.ssh.run(f"uci set wireless.{config.name}.device='{self.radio_5g}'") + self.ssh.run(f"uci set wireless.{config.name}.network='{config.iface}'") + self.ssh.run(f"uci set wireless.{config.name}.mode='ap'") + self.ssh.run(f"uci set wireless.{config.name}.ssid='{config.ssid}'") self.ssh.run( - "uci set wireless.%s.network='%s'" % (config.name, config.iface) - ) - self.ssh.run("uci set wireless.%s.mode='ap'" % config.name) - self.ssh.run("uci set wireless.%s.ssid='%s'" % (config.name, config.ssid)) - self.ssh.run( - "uci set wireless.%s.encryption='%s'" % (config.name, config.security) + f"uci set wireless.{config.name}.encryption='{config.security}'" ) if ( config.security == PSK_SECURITY @@ -147,16 +128,14 @@ or config.security == PSK1_SECURITY or config.security == SAEMIXED_SECURITY ): - self.ssh.run( - "uci set wireless.%s.key='%s'" % (config.name, config.password) - ) + self.ssh.run(f"uci set wireless.{config.name}.key='{config.password}'") elif config.security == WEP_SECURITY: self.ssh.run( "uci set wireless.%s.key%s='%s'" % (config.name, config.wep_key_num, config.wep_key) ) self.ssh.run( - "uci set wireless.%s.key='%s'" % (config.name, config.wep_key_num) + f"uci set wireless.{config.name}.key='{config.wep_key_num}'" ) elif config.security == ENT_SECURITY: self.ssh.run( @@ -173,16 +152,13 @@ ) if config.ieee80211w: self.ssh.run( - "uci set wireless.%s.ieee80211w='%s'" - % (config.name, config.ieee80211w) + f"uci set wireless.{config.name}.ieee80211w='{config.ieee80211w}'" ) if config.hidden: - self.ssh.run( - "uci set wireless.%s.hidden='%s'" % (config.name, ENABLE_HIDDEN) - ) + self.ssh.run(f"uci set wireless.{config.name}.hidden='{ENABLE_HIDDEN}'") self.ssh.run("uci commit wireless") - self.ssh.run("cp %s %s.tmp" % (LEASE_FILE, LEASE_FILE)) + self.ssh.run(f"cp {LEASE_FILE} {LEASE_FILE}.tmp") def cleanup_wireless_settings(self): """Reset wireless settings to default.""" @@ -191,6 +167,6 @@ self.ssh.run("wifi config") if self.channel_5g == 132: self.ssh.run("iw reg set US") - self.ssh.run("cp %s.tmp %s" % (LEASE_FILE, LEASE_FILE)) + self.ssh.run(f"cp {LEASE_FILE}.tmp {LEASE_FILE}") self.service_manager.restart(SERVICE_DNSMASQ) time.sleep(9)
diff --git a/src/antlion/controllers/packet_capture.py b/packages/antlion/controllers/packet_capture.py similarity index 66% rename from src/antlion/controllers/packet_capture.py rename to packages/antlion/controllers/packet_capture.py index ce3d8fd..5b753df 100755 --- a/src/antlion/controllers/packet_capture.py +++ b/packages/antlion/controllers/packet_capture.py
@@ -14,23 +14,27 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + +import io +import logging import os import threading import time +from dataclasses import dataclass -from antlion import logger -from antlion.controllers.ap_lib.hostapd_constants import FREQUENCY_MAP -from antlion.controllers.ap_lib.hostapd_constants import CENTER_CHANNEL_MAP -from antlion.controllers.ap_lib.hostapd_constants import VHT_CHANNEL -from antlion.controllers.utils_lib.ssh import connection -from antlion.controllers.utils_lib.ssh import formatter -from antlion.controllers.utils_lib.ssh import settings +from mobly import asserts, logger + +from antlion.controllers.ap_lib.hostapd_constants import ( + CENTER_CHANNEL_MAP, + FREQUENCY_MAP, + VHT_CHANNEL, +) +from antlion.controllers.utils_lib.ssh import connection, formatter, settings from antlion.libs.proc.process import Process +from antlion.types import ControllerConfig, Json -from mobly import asserts - -MOBLY_CONTROLLER_CONFIG_NAME = "PacketCapture" -ACTS_CONTROLLER_REFERENCE_NAME = "packet_capture" +MOBLY_CONTROLLER_CONFIG_NAME: str = "PacketCapture" BSS = "BSS" BSSID = "BSSID" FREQ = "freq" @@ -46,33 +50,31 @@ SSID = "SSID" -def create(configs): +def create(configs: list[ControllerConfig]) -> list[PacketCapture]: return [PacketCapture(c) for c in configs] -def destroy(pcaps): - for pcap in pcaps: +def destroy(objects: list[PacketCapture]) -> None: + for pcap in objects: pcap.close() -def get_info(pcaps): - return [pcap.ssh_settings.hostname for pcap in pcaps] +def get_info(objects: list[PacketCapture]) -> list[Json]: + return [pcap.ssh_settings.hostname for pcap in objects] -class PcapProperties(object): - """Class to maintain packet capture properties after starting tcpdump. +@dataclass(frozen=True) +class PcapProperties: + """Packet capture properties.""" - Attributes: - proc: Process object of tcpdump - pcap_fname: File name of the tcpdump output file - pcap_file: File object for the tcpdump output file - """ + proc: Process + """Process object of tcpdump.""" - def __init__(self, proc, pcap_fname, pcap_file): - """Initialize object.""" - self.proc = proc - self.pcap_fname = pcap_fname - self.pcap_file = pcap_file + pcap_fname: str + """File name of the tcpdump output file.""" + + pcap_file: io.BufferedRandom + """File object for the tcpdump output file.""" class PacketCaptureError(Exception): @@ -91,7 +93,7 @@ band. """ - def __init__(self, configs): + def __init__(self, configs: ControllerConfig) -> None: """Initialize objects. Args: @@ -99,48 +101,50 @@ """ self.ssh_settings = settings.from_config(configs["ssh_config"]) self.ssh = connection.SshConnection(self.ssh_settings) - self.log = logger.create_logger( - lambda msg: "[%s|%s] %s" - % (MOBLY_CONTROLLER_CONFIG_NAME, self.ssh_settings.hostname, msg) + self.log = logger.PrefixLoggerAdapter( + logging.getLogger(), + { + logger.PrefixLoggerAdapter.EXTRA_KEY_LOG_PREFIX: f"[PacketCapture|{self.ssh_settings.hostname}]", + }, ) self._create_interface(MON_2G, "monitor") self._create_interface(MON_5G, "monitor") self.managed_mode = True result = self.ssh.run("ifconfig -a", ignore_status=True) - if result.stderr or SCAN_IFACE not in result.stdout: + if result.stderr or SCAN_IFACE not in result.stdout.decode("utf-8"): self.managed_mode = False if self.managed_mode: self._create_interface(SCAN_IFACE, "managed") - self.pcap_properties = dict() + self.pcap_properties: dict[str, PcapProperties] = {} self._pcap_stop_lock = threading.Lock() - def _create_interface(self, iface, mode): + def _create_interface(self, iface: str, mode: str) -> None: """Create interface of monitor/managed mode. Create mon0/mon1 for 2G/5G monitor mode and wlan2 for managed mode. """ if mode == "monitor": - self.ssh.run("ifconfig wlan%s down" % iface[-1], ignore_status=True) - self.ssh.run("iw dev %s del" % iface, ignore_status=True) + self.ssh.run(f"ifconfig wlan{iface[-1]} down", ignore_status=True) + self.ssh.run(f"iw dev {iface} del", ignore_status=True) self.ssh.run( - "iw phy%s interface add %s type %s" % (iface[-1], iface, mode), + f"iw phy{iface[-1]} interface add {iface} type {mode}", ignore_status=True, ) - self.ssh.run("ip link set %s up" % iface, ignore_status=True) - result = self.ssh.run("iw dev %s info" % iface, ignore_status=True) - if result.stderr or iface not in result.stdout: - raise PacketCaptureError("Failed to configure interface %s" % iface) + self.ssh.run(f"ip link set {iface} up", ignore_status=True) + result = self.ssh.run(f"iw dev {iface} info", ignore_status=True) + if result.stderr or iface not in result.stdout.decode("utf-8"): + raise PacketCaptureError(f"Failed to configure interface {iface}") - def _cleanup_interface(self, iface): + def _cleanup_interface(self, iface: str) -> None: """Clean up monitor mode interfaces.""" - self.ssh.run("iw dev %s del" % iface, ignore_status=True) - result = self.ssh.run("iw dev %s info" % iface, ignore_status=True) - if not result.stderr or "No such device" not in result.stderr: - raise PacketCaptureError("Failed to cleanup monitor mode for %s" % iface) + self.ssh.run(f"iw dev {iface} del", ignore_status=True) + result = self.ssh.run(f"iw dev {iface} info", ignore_status=True) + if not result.stderr or "No such device" not in result.stderr.decode("utf-8"): + raise PacketCaptureError(f"Failed to cleanup monitor mode for {iface}") - def _parse_scan_results(self, scan_result): + def _parse_scan_results(self, scan_result: str) -> list[dict[str, str | int]]: """Parses the scan dump output and returns list of dictionaries. Args: @@ -154,8 +158,8 @@ c.) FREQUENCY - WiFi band the network is on. d.) BSSID - BSSID of the network. """ - scan_networks = [] - network = {} + scan_networks: list[dict[str, str | int]] = [] + network: dict[str, str | int] = {} for line in scan_result.splitlines(): if SEP not in line: continue @@ -173,7 +177,7 @@ network = {} return scan_networks - def get_wifi_scan_results(self): + def get_wifi_scan_results(self) -> list[dict[str, str | int]]: """Starts a wifi scan on wlan2 interface. Returns: @@ -181,14 +185,14 @@ """ if not self.managed_mode: raise PacketCaptureError("Managed mode not setup") - result = self.ssh.run("iw dev %s scan" % SCAN_IFACE) + result = self.ssh.run(f"iw dev {SCAN_IFACE} scan") if result.stderr: raise PacketCaptureError("Failed to get scan dump") if not result.stdout: return [] - return self._parse_scan_results(result.stdout) + return self._parse_scan_results(result.stdout.decode("utf-8")) - def start_scan_and_find_network(self, ssid): + def start_scan_and_find_network(self, ssid: str) -> bool: """Start a wifi scan on wlan2 interface and find network. Args: @@ -206,7 +210,9 @@ time.sleep(3) # sleep before next scan return False - def configure_monitor_mode(self, band, channel, bandwidth=20): + def configure_monitor_mode( + self, band: str, channel: int, bandwidth: int = 20 + ) -> bool: """Configure monitor mode. Args: @@ -226,9 +232,7 @@ iface = BAND_IFACE[band] if bandwidth == 20: - self.ssh.run( - "iw dev %s set channel %s" % (iface, channel), ignore_status=True - ) + self.ssh.run(f"iw dev {iface} set channel {channel}", ignore_status=True) else: center_freq = None for i, j in CENTER_CHANNEL_MAP[VHT_CHANNEL[bandwidth]]["channels"]: @@ -242,13 +246,15 @@ ignore_status=True, ) - result = self.ssh.run("iw dev %s info" % iface, ignore_status=True) - if result.stderr or "channel %s" % channel not in result.stdout: - self.log.error("Failed to configure monitor mode for %s" % band) + result = self.ssh.run(f"iw dev {iface} info", ignore_status=True) + if result.stderr or f"channel {channel}" not in result.stdout.decode("utf-8"): + self.log.error(f"Failed to configure monitor mode for {band}") return False return True - def start_packet_capture(self, band, log_path, pcap_fname): + def start_packet_capture( + self, band: str, log_path: str, pcap_fname: str + ) -> Process | None: """Start packet capture for band. band = 2G starts tcpdump on 'mon0' interface. @@ -267,22 +273,28 @@ self.log.error("Invalid band or packet capture already running") return None - pcap_name = "%s_%s.pcap" % (pcap_fname, band) + pcap_name = f"{pcap_fname}_{band}.pcap" pcap_fname = os.path.join(log_path, pcap_name) pcap_file = open(pcap_fname, "w+b") - tcpdump_cmd = "tcpdump -i %s -w - -U 2>/dev/null" % (BAND_IFACE[band]) + tcpdump_cmd = f"tcpdump -i {BAND_IFACE[band]} -w - -U 2>/dev/null" cmd = formatter.SshFormatter().format_command( - tcpdump_cmd, None, self.ssh_settings, extra_flags={"-q": None} + tcpdump_cmd, self.ssh_settings, extra_flags={"-q": None} ) pcap_proc = Process(cmd) - pcap_proc.set_on_output_callback(lambda msg: pcap_file.write(msg), binary=True) + + def write_to_pcap(data: bytes | str) -> None: + if isinstance(data, str): + data = data.encode("utf-8") + pcap_file.write(data) + + pcap_proc.set_on_output_callback(write_to_pcap, binary=True) pcap_proc.start() self.pcap_properties[band] = PcapProperties(pcap_proc, pcap_fname, pcap_file) return pcap_proc - def stop_packet_capture(self, proc): + def stop_packet_capture(self, proc: Process) -> None: """Stop the packet capture. Args: @@ -300,7 +312,7 @@ self.pcap_properties[key].pcap_file.close() del self.pcap_properties[key] - def close(self): + def close(self) -> None: """Cleanup. Cleans up all the monitor mode interfaces and closes ssh connections.
diff --git a/packages/antlion/controllers/pdu.py b/packages/antlion/controllers/pdu.py new file mode 100644 index 0000000..503d964 --- /dev/null +++ b/packages/antlion/controllers/pdu.py
@@ -0,0 +1,293 @@ +#!/usr/bin/env python3 +# +# Copyright 2022 The Fuchsia Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import enum +import logging +import time +from enum import IntEnum, unique +from typing import Protocol + +from antlion.types import ControllerConfig, Json +from antlion.validation import MapValidator + +MOBLY_CONTROLLER_CONFIG_NAME: str = "PduDevice" + +# Allow time for capacitors to discharge. +DEFAULT_REBOOT_DELAY_SEC = 5.0 + + +class PduType(enum.StrEnum): + NP02B = "synaccess.np02b" + WEBPOWERSWITCH = "digital_loggers.webpowerswitch" + + +class PduError(Exception): + """An exception for use within PduDevice implementations""" + + +def create(configs: list[ControllerConfig]) -> list[PduDevice]: + """Creates a PduDevice for each config in configs. + + Args: + configs: List of configs from PduDevice field. + Fields: + device: a string "<brand>.<model>" that corresponds to module + in pdu_lib/ + host: a string of the device ip address + username (optional): a string of the username for device sign-in + password (optional): a string of the password for device sign-in + Return: + A list of PduDevice objects. + """ + pdus: list[PduDevice] = [] + for config in configs: + c = MapValidator(config) + device = c.get(str, "device") + pduType = PduType(device) + + host = c.get(str, "host") + username = c.get(str, "username", None) + password = c.get(str, "password", None) + + match pduType: + case PduType.NP02B: + from antlion.controllers.pdu_lib.synaccess.np02b import ( + PduDevice as NP02B, + ) + + pdus.append(NP02B(host, username, password)) + case PduType.WEBPOWERSWITCH: + from antlion.controllers.pdu_lib.digital_loggers.webpowerswitch import ( + PduDevice as WebPowerSwitch, + ) + + pdus.append(WebPowerSwitch(host, username, password)) + return pdus + + +def destroy(objects: list[PduDevice]) -> None: + """Ensure any connections to devices are closed. + + Args: + pdu_list: A list of PduDevice objects. + """ + for pdu in objects: + pdu.close() + + +def get_info(objects: list[PduDevice]) -> list[Json]: + """Retrieves info from a list of PduDevice objects. + + Args: + pdu_list: A list of PduDevice objects. + Return: + A list containing a dictionary for each PduDevice, with keys: + 'host': a string of the device ip address + 'username': a string of the username + 'password': a string of the password + """ + info: list[Json] = [] + for pdu in objects: + info.append( + {"host": pdu.host, "username": pdu.username, "password": pdu.password} + ) + return info + + +def get_pdu_port_for_device( + device_pdu_config: dict[str, Json], pdus: list[PduDevice] +) -> tuple[PduDevice, int]: + """Retrieves the pdu object and port of that PDU powering a given device. + This is especially necessary when there are multilpe devices on a single PDU + or multiple PDUs registered. + + Args: + device_pdu_config: a dict, representing the config of the device. + pdus: a list of registered PduDevice objects. + + Returns: + A tuple: (PduObject for the device, string port number on that PDU). + + Raises: + ValueError, if there is no PDU matching the given host in the config. + + Example ACTS config: + ... + "testbed": [ + ... + "FuchsiaDevice": [ + { + "ip": "<device_ip>", + "ssh_config": "/path/to/sshconfig", + "PduDevice": { + "host": "192.168.42.185", + "port": 2 + } + } + ], + "AccessPoint": [ + { + "ssh_config": { + ... + }, + "PduDevice": { + "host": "192.168.42.185", + "port" 1 + } + } + ], + "PduDevice": [ + { + "device": "synaccess.np02b", + "host": "192.168.42.185" + } + ] + ], + ... + """ + config = MapValidator(device_pdu_config) + pdu_ip = config.get(str, "host") + port = config.get(int, "port") + for pdu in pdus: + if pdu.host == pdu_ip: + return pdu, port + raise ValueError(f"No PduDevice with host: {pdu_ip}") + + +class PDU(Protocol): + """Control power delivery to a device with a PDU.""" + + def port(self, index: int) -> Port: + """Access a single port. + + Args: + index: Index of the port, likely the number identifier above the outlet. + + Returns: + Controller for the specified port. + """ + ... + + def __len__(self) -> int: + """Count the number of ports. + + Returns: + Number of ports on this PDU. + """ + ... + + +class Port(Protocol): + """Controlling the power delivery to a single port of a PDU.""" + + def status(self) -> PowerState: + """Return the power state for this port. + + Returns: + Power state + """ + ... + + def set(self, state: PowerState) -> None: + """Set the power state for this port. + + Args: + state: Desired power state + """ + ... + + def reboot(self, delay_sec: float = DEFAULT_REBOOT_DELAY_SEC) -> None: + """Set the power state OFF then ON after a delay. + + Args: + delay_sec: Length to wait before turning back ON. This is important to allow + the device's capacitors to discharge. + """ + self.set(PowerState.OFF) + time.sleep(delay_sec) + self.set(PowerState.ON) + + +@unique +class PowerState(IntEnum): + OFF = 0 + ON = 1 + + +class PduDevice(object): + """An object that defines the basic Pdu functionality and abstracts + the actual hardware. + + This is a pure abstract class. Implementations should be of the same + class name (eg. class PduDevice(pdu.PduDevice)) and exist in + pdu_lib/<brand>/<device_name>.py. PduDevice objects should not be + instantiated by users directly. + + TODO(http://b/318877544): Replace PduDevice with PDU + """ + + def __init__(self, host: str, username: str | None, password: str | None) -> None: + if type(self) is PduDevice: + raise NotImplementedError("Base class: cannot be instantiated directly") + self.host = host + self.username = username + self.password = password + self.log = logging.getLogger() + + def on_all(self) -> None: + """Turns on all outlets on the device.""" + raise NotImplementedError("Base class: cannot be called directly") + + def off_all(self) -> None: + """Turns off all outlets on the device.""" + raise NotImplementedError("Base class: cannot be called directly") + + def on(self, outlet: int) -> None: + """Turns on specific outlet on the device. + Args: + outlet: index of the outlet to turn on. + """ + raise NotImplementedError("Base class: cannot be called directly") + + def off(self, outlet: int) -> None: + """Turns off specific outlet on the device. + Args: + outlet: index of the outlet to turn off. + """ + raise NotImplementedError("Base class: cannot be called directly") + + def reboot(self, outlet: int) -> None: + """Toggles a specific outlet on the device to off, then to on. + Args: + outlet: index of the outlet to reboot. + """ + raise NotImplementedError("Base class: cannot be called directly") + + def status(self) -> dict[str, bool]: + """Retrieves the status of the outlets on the device. + + Return: + A dictionary matching outlet string to: + True: if outlet is On + False: if outlet is Off + """ + raise NotImplementedError("Base class: cannot be called directly") + + def close(self) -> None: + """Closes connection to the device.""" + raise NotImplementedError("Base class: cannot be called directly")
diff --git a/src/antlion/controllers/pdu_lib/__init__.py b/packages/antlion/controllers/pdu_lib/__init__.py similarity index 100% rename from src/antlion/controllers/pdu_lib/__init__.py rename to packages/antlion/controllers/pdu_lib/__init__.py
diff --git a/src/antlion/controllers/pdu_lib/digital_loggers/__init__.py b/packages/antlion/controllers/pdu_lib/digital_loggers/__init__.py similarity index 100% rename from src/antlion/controllers/pdu_lib/digital_loggers/__init__.py rename to packages/antlion/controllers/pdu_lib/digital_loggers/__init__.py
diff --git a/src/antlion/controllers/pdu_lib/digital_loggers/webpowerswitch.py b/packages/antlion/controllers/pdu_lib/digital_loggers/webpowerswitch.py similarity index 94% rename from src/antlion/controllers/pdu_lib/digital_loggers/webpowerswitch.py rename to packages/antlion/controllers/pdu_lib/digital_loggers/webpowerswitch.py index 1154f95..660e965 100644 --- a/src/antlion/controllers/pdu_lib/digital_loggers/webpowerswitch.py +++ b/packages/antlion/controllers/pdu_lib/digital_loggers/webpowerswitch.py
@@ -14,7 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from antlion import signals +from mobly import signals + from antlion.controllers import pdu # Create an optional dependency for dlipower since it has a transitive @@ -43,7 +44,7 @@ - Ethernet Power Controller III """ - def __init__(self, host, username, password): + def __init__(self, host: str, username: str | None, password: str | None) -> None: """ Note: This may require allowing plaintext password sign in on the power switch, which can be configure in the device's control panel. @@ -67,7 +68,7 @@ "userid, or password?" ) else: - self.log.info("Connected to WebPowerSwitch (%s)." % host) + self.log.info(f"Connected to WebPowerSwitch ({host}).") def on_all(self): """Turn on power to all outlets.""" @@ -144,9 +145,7 @@ if actual_state == expected_state: return else: - self.log.debug( - "Outlet %s not yet in state %s" % (outlet, expected_state) - ) + self.log.debug(f"Outlet {outlet} not yet in state {expected_state}") raise pdu.PduError( "Outlet %s on WebPowerSwitch (%s) failed to reach expected state. \n" "Expected State: %s\n"
diff --git a/src/antlion/controllers/pdu_lib/synaccess/__init__.py b/packages/antlion/controllers/pdu_lib/synaccess/__init__.py similarity index 100% rename from src/antlion/controllers/pdu_lib/synaccess/__init__.py rename to packages/antlion/controllers/pdu_lib/synaccess/__init__.py
diff --git a/packages/antlion/controllers/pdu_lib/synaccess/np02b.py b/packages/antlion/controllers/pdu_lib/synaccess/np02b.py new file mode 100644 index 0000000..d977a2a --- /dev/null +++ b/packages/antlion/controllers/pdu_lib/synaccess/np02b.py
@@ -0,0 +1,268 @@ +#!/usr/bin/env python3 +# +# Copyright 2022 The Fuchsia Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#