Skip to content

Commit

Permalink
Merge branch 'main' into updated-readme
Browse files Browse the repository at this point in the history
  • Loading branch information
MsRandom authored Dec 10, 2024
2 parents 80fe9a6 + cea9d9d commit 3b5adeb
Show file tree
Hide file tree
Showing 9 changed files with 3,009 additions and 26 deletions.
2 changes: 1 addition & 1 deletion base/testing/vram_monitor.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

from base.contest import Contest

SAMPLE_RATE_MS = 10
SAMPLE_RATE_MS = 100


class VRamMonitor:
Expand Down
25 changes: 25 additions & 0 deletions tests/pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
[build-system]
requires = ["setuptools >= 75.0"]
build-backend = "setuptools.build_meta"

[project]
name = "edge-maxxing-tests"
description = "Validator unit tests"
requires-python = ">=3.10,<3.13"
version = "1.0.0"
dependencies = [
"edge-maxxing-validator>=5.5.8",
]

[tool.uv.sources]
edge-maxxing-validator = { path = "../validator", editable = true }

[dependency-groups]
dev = [
"pytest>=8.3.4",
]

[tool.setuptools]
packages = [
"tests"
]
325 changes: 325 additions & 0 deletions tests/tests/test_winner_selection.py

Large diffs are not rendered by default.

2,628 changes: 2,628 additions & 0 deletions tests/uv.lock

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion validator/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ build-backend = "setuptools.build_meta"
name = "edge-maxxing-validator"
description = "The validator which checks models and checkpoints provided by miners"
requires-python = ">=3.10,<3.13"
version = "5.6.1"
version = "5.6.4"
dependencies = [
"edge-maxxing-base==1.0.0",
"opentelemetry-api>=1.28.2",
Expand Down
2 changes: 1 addition & 1 deletion validator/submission_tester/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,5 +148,5 @@ def initialize(
})

benchmarker: Benchmarker = request.state.benchmarker
benchmarker.shutdown()
benchmarker.reset()
benchmarker.shutdown()
2 changes: 1 addition & 1 deletion validator/weight_setting/contest_state.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def start_new_contest(self, benchmarks_version: int, submissions: Submissions):

if self.benchmarking_state == BenchmarkState.FINISHED:
logger.info("Updating benchmarks for weight setting")
self.last_benchmarks = self.benchmarks
self.last_benchmarks = dict(self.benchmarks)

self.benchmarks.clear()
self.invalid_submissions.clear()
Expand Down
31 changes: 18 additions & 13 deletions validator/weight_setting/validator.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from requests.exceptions import HTTPError, ConnectionError
from substrateinterface import SubstrateInterface, Keypair

from base.checkpoint import Uid
from base.checkpoint import Uid, Submissions
from base.config import get_config
from base.contest import BenchmarkState
from base.inputs_api import get_inputs_state
Expand Down Expand Up @@ -114,6 +114,20 @@ def __init__(self):

self.run()

def initialize_apis(self, untested_submissions: Submissions):
for api in self.benchmarking_apis:
api.initialize(
uid=self.uid,
signature=self.signature,
netuid=self.metagraph.netuid,
substrate_url=self.substrate.url,
)
send_submissions_to_api(
version=self.validator_version,
all_apis=self.benchmarking_apis,
submissions=untested_submissions,
)

@tracer.start_as_current_span("initialize_contest")
def initialize_contest(self, benchmarks_version: int):
self.metagraph.sync_nodes()
Expand All @@ -132,6 +146,8 @@ def initialize_contest(self, benchmarks_version: int):
self._stop_flag.wait(sleep_blocks * 12)
return

self.initialize_apis(self.contest_state.submissions)

self.wandb_manager.init_wandb(self.contest_state)
logger.info(f"Starting a new contest with {len(self.contest_state.submissions)} submissions")

Expand All @@ -158,18 +174,7 @@ def do_step(self):
benchmarking_results = [api.results() for api in self.benchmarking_apis]

if any(result.state == BenchmarkState.NOT_STARTED for result in benchmarking_results):
for api in self.benchmarking_apis:
api.initialize(
uid=self.uid,
signature=self.signature,
netuid=self.metagraph.netuid,
substrate_url=self.substrate.url,
)
send_submissions_to_api(
version=self.validator_version,
all_apis=self.benchmarking_apis,
submissions=untested_submissions,
)
self.initialize_apis(untested_submissions)
return

self.update_benchmarks(benchmarking_results)
Expand Down
18 changes: 9 additions & 9 deletions validator/weight_setting/winner_selection.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
from operator import itemgetter
from statistics import median

import numpy
from base.checkpoint import Key, Submissions, Benchmarks
from base.contest import Metrics
from base.inputs_api import get_inputs_state

DEVIATION_THRESHOLD_PERCENTILE = 96


def get_contestant_scores(
submissions: Submissions,
Expand Down Expand Up @@ -34,14 +36,13 @@ def get_contestant_ranks(scores: dict[Key, float]) -> dict[Key, int]:
scores = list(sorted(scores.items(), key=itemgetter(1), reverse=True))
score_values = list(map(itemgetter(1), scores))

deviation = median(
deviations = numpy.array(list(
score_values[i] - score_values[i + 1]
for i in range(len(score_values) - 1)
if (
score_values[i + 1] > 0 and
score_values[i] - score_values[i + 1] > 0.0001
)
)
if score_values[i + 1] > 0
))

threshold = numpy.percentile(deviations, DEVIATION_THRESHOLD_PERCENTILE)

scores = iter(scores)

Expand All @@ -53,8 +54,7 @@ def get_contestant_ranks(scores: dict[Key, float]) -> dict[Key, int]:
difference = last_score - score
i += 1

if difference > deviation:
deviation = (deviation * i + difference) / i
if difference > threshold:
rank += 1

ranks[hotkey] = rank
Expand Down

0 comments on commit 3b5adeb

Please sign in to comment.