Skip to content

Commit

Permalink
Merge branch 'opensearch-project:main' into log_error_console
Browse files Browse the repository at this point in the history
  • Loading branch information
cgchinmay authored Nov 8, 2023
2 parents 8c3c56e + 3adacc0 commit 9f401f2
Show file tree
Hide file tree
Showing 6 changed files with 24 additions and 15 deletions.
6 changes: 5 additions & 1 deletion DEVELOPER_GUIDE.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ This document will walk you through on what's needed to start contributing code
- [Miscellaneous](#miscellaneous)
- [git-secrets](#git-secrets)
- [Adding new major and minor Python versions to OpenSearch Benchmark](#adding-new-major-and-minor-python-versions-to-opensearch-benchmark)
- [Debugging OpenSearch Benchmark in Developer Mode](#debugging-opensearch-benchmark-in-developer-mode)
- [Debugging unittests in Visual Studio Code](#debugging-unittests-in-visual-studio-code)

## Installation
Expand Down Expand Up @@ -215,6 +216,9 @@ make install
### Adding New Major and Minor Python Versions to OpenSearch-Benchmark
To streamline the process, please refer to [this guide](https://github.com/opensearch-project/opensearch-benchmark/blob/main/PYTHON_SUPPORT_GUIDE.md)
### Debugging OpenSearch Benchmark in Developer Mode
Many users find that the simplest way to debug OpenSearch Benchmark is by using developer mode. Users can activate developer mode by running `python3 -m pip install -e .` within the cloned OpenSearch Benchmark repository. Any changes made and saved will be reflected when OpenSearch Benchmark is run. Users can add loggers or print statements and see the changes reflected in subsequent runs.
### Debugging Unittests in Visual Studio Code
To run and debug unittests in Visual Studio Code, add the following configuration to the Python Debugger `launch.json` file. See [the official Visual Studio Code documentation](https://code.visualstudio.com/docs/editor/debugging) for more information on setting up and accessing `launch.json` file.
```
Expand All @@ -226,4 +230,4 @@ To run and debug unittests in Visual Studio Code, add the following configuratio
"args": ["-k ${file}"]
}
```
With this, users can easily run and debug unittests within Visual Studio Code without invoking pytest manually on the command line.
With this, users can easily run and debug unittests within Visual Studio Code without invoking pytest manually on the command line.
6 changes: 3 additions & 3 deletions osbenchmark/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -470,7 +470,7 @@ def add_workload_source(subparser):
"(default: localhost:9200).",
default="") # actually the default is pipeline specific and it is set later
test_execution_parser.add_argument(
"--load-worker-coordinator-hosts",
"--worker-ips",
help="Define a comma-separated list of hosts which should generate load (default: localhost).",
default="localhost")
test_execution_parser.add_argument(
Expand Down Expand Up @@ -859,8 +859,8 @@ def dispatch_sub_command(arg_parser, args, cfg):
cfg.add(
config.Scope.applicationOverride,
"worker_coordinator",
"load_worker_coordinator_hosts",
opts.csv_to_list(args.load_worker_coordinator_hosts))
"worker_ips",
opts.csv_to_list(args.worker_ips))
cfg.add(config.Scope.applicationOverride, "workload", "test.mode.enabled", args.test_mode)
configure_workload_params(arg_parser, args, cfg)
configure_connection_params(arg_parser, args, cfg)
Expand Down
9 changes: 5 additions & 4 deletions osbenchmark/utils/process.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,11 +133,12 @@ def run_subprocess_with_logging(command_line, header=None, level=logging.INFO, s

def is_benchmark_process(p):
cmdline = p.cmdline()
# On Linux, /proc/PID/status truncates the command name to 15 characters.
return p.name() == "opensearch-benchmark" or \
(p.name().lower().startswith("python") and
(len(cmdline) > 1 and
(cmdline[1] == "opensearch-benchmark" or
cmdline[1].endswith(os.path.sep + "opensearch-benchmark"))))
p.name() == "opensearch-benc" or \
(len(cmdline) > 1 and
os.path.basename(cmdline[0].lower()).startswith("python") and
os.path.basename(cmdline[1]) == "opensearch-benchmark")


def find_all_other_benchmark_processes():
Expand Down
10 changes: 5 additions & 5 deletions osbenchmark/worker_coordinator/worker_coordinator.py
Original file line number Diff line number Diff line change
Expand Up @@ -528,7 +528,7 @@ def __init__(self, target, config, os_client_factory_class=client.OsClientFactor
self.workload = None
self.test_procedure = None
self.metrics_store = None
self.load_worker_coordinator_hosts = []
self.worker_ips = []
self.workers = []
# which client ids are assigned to which workers?
self.clients_per_worker = {}
Expand Down Expand Up @@ -636,7 +636,7 @@ def prepare_benchmark(self, t):
# are not useful and attempts to connect to a non-existing cluster just lead to exception traces in logs.
self.prepare_telemetry(os_clients, enable=not uses_static_responses)

for host in self.config.opts("worker_coordinator", "load_worker_coordinator_hosts"):
for host in self.config.opts("worker_coordinator", "worker_ips"):
host_config = {
# for simplicity we assume that all benchmark machines have the same specs
"cores": num_cores(self.config)
Expand All @@ -646,9 +646,9 @@ def prepare_benchmark(self, t):
else:
host_config["host"] = host

self.load_worker_coordinator_hosts.append(host_config)
self.worker_ips.append(host_config)

self.target.prepare_workload([h["host"] for h in self.load_worker_coordinator_hosts], self.config, self.workload)
self.target.prepare_workload([h["host"] for h in self.worker_ips], self.config, self.workload)

def start_benchmark(self):
self.logger.info("Benchmark is about to start.")
Expand All @@ -669,7 +669,7 @@ def start_benchmark(self):
if allocator.clients < 128:
self.logger.info("Allocation matrix:\n%s", "\n".join([str(a) for a in self.allocations]))

worker_assignments = calculate_worker_assignments(self.load_worker_coordinator_hosts, allocator.clients)
worker_assignments = calculate_worker_assignments(self.worker_ips, allocator.clients)
worker_id = 0
for assignment in worker_assignments:
host = assignment["host"]
Expand Down
4 changes: 4 additions & 0 deletions tests/utils/process_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,8 @@ def test_kills_only_benchmark_processes(self, process_iter):
random_python = ProcessTests.Process(103, "python3", ["/some/django/app"])
other_process = ProcessTests.Process(104, "init", ["/usr/sbin/init"])
benchmark_process_p = ProcessTests.Process(105, "python3", ["/usr/bin/python3", "~/.local/bin/opensearch-benchmark"])
# On Linux, the process name is truncated to 15 characters.
benchmark_process_l = ProcessTests.Process(106, "opensearch-benc", ["/usr/bin/python3", "~/.local/bin/osbenchmark"])
benchmark_process_e = ProcessTests.Process(107, "opensearch-benchmark", ["/usr/bin/python3", "~/.local/bin/opensearch-benchmark"])
benchmark_process_mac = ProcessTests.Process(108, "Python", ["/Python.app/Contents/MacOS/Python",
"~/.local/bin/opensearch-benchmark"])
Expand All @@ -139,6 +141,7 @@ def test_kills_only_benchmark_processes(self, process_iter):
random_python,
other_process,
benchmark_process_p,
benchmark_process_l,
benchmark_process_e,
benchmark_process_mac,
own_benchmark_process,
Expand All @@ -153,6 +156,7 @@ def test_kills_only_benchmark_processes(self, process_iter):
self.assertFalse(random_python.killed)
self.assertFalse(other_process.killed)
self.assertTrue(benchmark_process_p.killed)
self.assertTrue(benchmark_process_l.killed)
self.assertTrue(benchmark_process_e.killed)
self.assertTrue(benchmark_process_mac.killed)
self.assertFalse(own_benchmark_process.killed)
Expand Down
4 changes: 2 additions & 2 deletions tests/worker_coordinator/worker_coordinator_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ def setUp(self):
self.cfg.add(config.Scope.application, "client", "hosts",
WorkerCoordinatorTests.Holder(all_hosts={"default": ["localhost:9200"]}))
self.cfg.add(config.Scope.application, "client", "options", WorkerCoordinatorTests.Holder(all_client_options={"default": {}}))
self.cfg.add(config.Scope.application, "worker_coordinator", "load_worker_coordinator_hosts", ["localhost"])
self.cfg.add(config.Scope.application, "worker_coordinator", "worker_ips", ["localhost"])
self.cfg.add(config.Scope.application, "results_publishing", "datastore.type", "in-memory")

default_test_procedure = workload.TestProcedure("default", default=True, schedule=[
Expand All @@ -135,7 +135,7 @@ def create_test_worker_coordinator_target(self):
@mock.patch("osbenchmark.utils.net.resolve")
def test_start_benchmark_and_prepare_workload(self, resolve):
# override load worker_coordinator host
self.cfg.add(config.Scope.applicationOverride, "worker_coordinator", "load_worker_coordinator_hosts", ["10.5.5.1", "10.5.5.2"])
self.cfg.add(config.Scope.applicationOverride, "worker_coordinator", "worker_ips", ["10.5.5.1", "10.5.5.2"])
resolve.side_effect = ["10.5.5.1", "10.5.5.2"]

target = self.create_test_worker_coordinator_target()
Expand Down

0 comments on commit 9f401f2

Please sign in to comment.