From 1221a4ea7f4952d41f155a458b008d2ef59348c9 Mon Sep 17 00:00:00 2001 From: Ashley Wright Date: Tue, 22 Oct 2024 03:49:51 +0300 Subject: [PATCH 01/10] Set single winner per tier that takes a higher percentage of the winnings --- validator/weight_setting/winner_selection.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/validator/weight_setting/winner_selection.py b/validator/weight_setting/winner_selection.py index ba9ea7f1..83b5e0c5 100644 --- a/validator/weight_setting/winner_selection.py +++ b/validator/weight_setting/winner_selection.py @@ -21,10 +21,12 @@ def get_scores(contestants: list[tuple[int, float]], node_count: int) -> list[fl if not contestants: return [] - _, last_tier_score = contestants[0] + last_uid, last_tier_score = contestants[0] scores = [0.0] * node_count - tier = 1 + winning_uids = [] + + uid = last_uid for contestant in contestants: uid, score = contestant @@ -32,8 +34,13 @@ def get_scores(contestants: list[tuple[int, float]], node_count: int) -> list[fl if score > last_tier_score * TIER_SCORE_IMPROVEMENT_THRESHOLD: # No longer in top threshold last_tier_score = score - tier += 1 + winning_uids.append(last_uid) + + last_uid = uid + + winning_uids.append(uid) - scores[uid] = (score + 1) ** (tier * 0.75) + for winner_index, uid in enumerate(reversed(winning_uids)): + scores[uid] = 0.5 * (0.5 ** winner_index) return scores From d2e7105e7851c85d7d32524ad52105e38a9ca6da Mon Sep 17 00:00:00 2001 From: Ashley Wright Date: Tue, 22 Oct 2024 04:56:49 +0300 Subject: [PATCH 02/10] Pick oldest UID of 7.5% sized tiers, remove deduplication --- miner/miner/submit.py | 1 - neuron/neuron/submission_tester/hash.py | 20 --------- neuron/neuron/submission_tester/metrics.py | 1 - neuron/neuron/submission_tester/testing.py | 42 ++--------------- validator/submission_tester/benchmarker.py | 1 - validator/weight_setting/deduplication.py | 47 -------------------- validator/weight_setting/validator.py | 24 +++------- validator/weight_setting/winner_selection.py | 31 ++++++++----- 8 files changed, 27 insertions(+), 140 deletions(-) delete mode 100644 neuron/neuron/submission_tester/hash.py delete mode 100644 validator/weight_setting/deduplication.py diff --git a/miner/miner/submit.py b/miner/miner/submit.py index 639cc435..a9ebe4cd 100644 --- a/miner/miner/submit.py +++ b/miner/miner/submit.py @@ -158,7 +158,6 @@ def start_benchmarking(submission: CheckpointSubmission): compare_checkpoints( ModelRepositoryInfo(url=submission.get_repo_link(), revision=submission.revision), - [], inputs, baseline, MODEL_DIRECTORY, diff --git a/neuron/neuron/submission_tester/hash.py b/neuron/neuron/submission_tester/hash.py deleted file mode 100644 index eab610d9..00000000 --- a/neuron/neuron/submission_tester/hash.py +++ /dev/null @@ -1,20 +0,0 @@ -from imagehash import ImageHash -from safetensors import numpy - - -__KEY = "DEFAULT" - - -GENERATION_TIME_DIFFERENCE_THRESHOLD = 0.02 - - -def save_image_hash(image_hash: ImageHash) -> bytes: - return numpy.save( - { - __KEY: image_hash.hash, - }, - ) - - -def load_image_hash(image_hash_bytes: bytes) -> ImageHash: - return ImageHash(numpy.load(image_hash_bytes)[__KEY]) diff --git a/neuron/neuron/submission_tester/metrics.py b/neuron/neuron/submission_tester/metrics.py index aadb3546..3b5d3035 100644 --- a/neuron/neuron/submission_tester/metrics.py +++ b/neuron/neuron/submission_tester/metrics.py @@ -25,7 +25,6 @@ class CheckpointBenchmark(BaseModel): model: MetricData average_similarity: float min_similarity: float - image_hash: bytes def calculate_score(self, baseline_metrics: MetricData) -> float: if self.min_similarity < SIMILARITY_SCORE_THRESHOLD: diff --git a/neuron/neuron/submission_tester/testing.py b/neuron/neuron/submission_tester/testing.py index e80a21ec..789f21d0 100644 --- a/neuron/neuron/submission_tester/testing.py +++ b/neuron/neuron/submission_tester/testing.py @@ -1,27 +1,21 @@ -import asyncio import logging +from collections.abc import Iterable from concurrent.futures import ThreadPoolExecutor, CancelledError from pathlib import Path from statistics import mean -from collections.abc import Iterable -from io import BytesIO from threading import Event from time import perf_counter -from .hash import load_image_hash, save_image_hash, GENERATION_TIME_DIFFERENCE_THRESHOLD -from .metrics import CheckpointBenchmark, MetricData, BaselineBenchmark -import imagehash -from PIL import Image - from neuron import ( GenerationOutput, ModelRepositoryInfo, CURRENT_CONTEST, Key, OutputComparator, ) -from .vram_monitor import VRamMonitor from pipelines import TextToImageRequest from .inference_sandbox import InferenceSandbox +from .metrics import CheckpointBenchmark, MetricData, BaselineBenchmark +from .vram_monitor import VRamMonitor SANDBOX_DIRECTORY = Path("/sandbox") BASELINE_SANDBOX_DIRECTORY = Path("/baseline-sandbox") @@ -99,7 +93,6 @@ def generate_baseline( def compare_checkpoints( submission: ModelRepositoryInfo, - existing_benchmarks: Iterable[tuple[Key, CheckpointBenchmark | None]], inputs: list[TextToImageRequest], baseline: BaselineBenchmark, sandbox_directory: Path = SANDBOX_DIRECTORY, @@ -114,8 +107,6 @@ def compare_checkpoints( with InferenceSandbox(submission, False, sandbox_directory, switch_user, cache) as sandbox: size = sandbox.model_size - image_hash = None - f"Take {len(inputs)} samples, keeping track of how fast/accurate generations have been" for index, request in enumerate(inputs): logger.info(f"Sample {index + 1}, prompt {request.prompt} and seed {request.seed}") @@ -125,32 +116,6 @@ def compare_checkpoints( output = generate(sandbox, request) - if not image_hash: - with BytesIO(output.output) as data: - image_hash = imagehash.average_hash(Image.open(data)) - - image_hash_bytes = save_image_hash(image_hash) - - match = next( - ( - (key, existing_benchmark) - for key, existing_benchmark in existing_benchmarks - if ( - existing_benchmark and - not (image_hash - load_image_hash(existing_benchmark.image_hash)) and - abs(output.generation_time - existing_benchmark.model.generation_time) < GENERATION_TIME_DIFFERENCE_THRESHOLD - ) - ), - None, - ) - - if match: - key, benchmark = match - - logger.info(f"Submission {submission} marked as duplicate of hotkey {key}'s submission") - - return benchmark - logger.info( f"Sample {index + 1} Generated\n" f"Generation Time: {output.generation_time}s\n" @@ -201,7 +166,6 @@ def calculate_similarity(comparator: OutputComparator, baseline_output: Generati ), average_similarity=average_similarity, min_similarity=min_similarity, - image_hash=image_hash_bytes, ) logger.info( diff --git a/validator/submission_tester/benchmarker.py b/validator/submission_tester/benchmarker.py index 61d3ca63..69a351bb 100644 --- a/validator/submission_tester/benchmarker.py +++ b/validator/submission_tester/benchmarker.py @@ -62,7 +62,6 @@ def _benchmark_key(self, hotkey: Key): try: self.benchmarks[hotkey] = compare_checkpoints( submission, - self.benchmarks.items(), self.inputs, self.baseline, cancelled_event=self.cancelled_event, diff --git a/validator/weight_setting/deduplication.py b/validator/weight_setting/deduplication.py deleted file mode 100644 index 5486f968..00000000 --- a/validator/weight_setting/deduplication.py +++ /dev/null @@ -1,47 +0,0 @@ -from dataclasses import dataclass -from collections.abc import Iterator - -from imagehash import ImageHash - -from neuron import GENERATION_TIME_DIFFERENCE_THRESHOLD - - -@dataclass -class PotentiallyDuplicateSubmissionInfo: - image_hash: ImageHash - generation_time: float - block: int - - -def find_duplicates(benchmark_info: list[PotentiallyDuplicateSubmissionInfo | None]) -> Iterator[tuple[int, int]]: - duplicate_buckets: list[set[int]] = [] - - for uid_a, benchmark_a in enumerate(benchmark_info): - if not benchmark_a: - continue - - for uid_b, benchmark_b in enumerate(benchmark_info): - if not benchmark_b: - continue - - if uid_a == uid_b: - continue - - if ( - not (benchmark_b.image_hash - benchmark_a.image_hash) - and abs(benchmark_b.generation_time - benchmark_a.generation_time) < GENERATION_TIME_DIFFERENCE_THRESHOLD - ): - matching_buckets = [bucket for bucket in duplicate_buckets if uid_a in bucket or uid_b in bucket] - if len(matching_buckets): - bucket = matching_buckets[0] - bucket.add(uid_a) - bucket.add(uid_b) - else: - duplicate_buckets.append({uid_a, uid_b}) - - for bucket in duplicate_buckets: - oldest = min(bucket, key=lambda uid: benchmark_info[uid].block) - - for uid in bucket: - if uid != oldest: - yield uid, oldest diff --git a/validator/weight_setting/validator.py b/validator/weight_setting/validator.py index ddb7d1d4..8bfefd15 100644 --- a/validator/weight_setting/validator.py +++ b/validator/weight_setting/validator.py @@ -42,14 +42,12 @@ BENCHMARKS_VERSION, ) from neuron.submission_tester import ( - load_image_hash, CheckpointBenchmark, MetricData, ) from .benchmarking_api import BenchmarkingApi, benchmarking_api -from .deduplication import find_duplicates, PotentiallyDuplicateSubmissionInfo from .wandb_args import add_wandb_args -from .winner_selection import get_scores, get_contestant_scores +from .winner_selection import get_scores, get_contestant_scores, get_tiers, pick_winners VALIDATOR_VERSION: tuple[int, int, int] = (4, 2, 0) VALIDATOR_VERSION_STRING = ".".join(map(str, VALIDATOR_VERSION)) @@ -123,8 +121,6 @@ class Validator: benchmarking_state: BenchmarkState failed: set[int] = set() # for backwards depickling compatibility invalid: dict[int, str] - hash_prompt: str - hash_seed: int contest: Contest def __init__(self): @@ -550,7 +546,10 @@ def set_weights(self): logger.info("Setting weights") - weights = get_scores(get_contestant_scores(self.benchmarks, self.baseline_metrics), len(self.metagraph.nodes)) + contestants = get_contestant_scores(self.benchmarks, self.baseline_metrics) + tiers = get_tiers(contestants) + winners = pick_winners(tiers, [info.block if info else None for info in self.contest_state.miner_info]) + weights = get_scores(winners, len(self.metagraph.nodes)) self.send_wandb_metrics() @@ -848,19 +847,6 @@ async def do_step(self, block: int): ) logger.info(self.benchmarks) - benchmark_duplicate_info = [ - PotentiallyDuplicateSubmissionInfo( - image_hash=load_image_hash(benchmark.image_hash), - generation_time=benchmark.model.generation_time, - block=self.contest_state.miner_info[uid].block, - ) if benchmark else None - for uid, benchmark in enumerate(self.benchmarks) - ] - - for duplicate_uid, original_uid in find_duplicates(benchmark_duplicate_info): - self.benchmarks[duplicate_uid] = None - self.invalid[duplicate_uid] = f"Duplicate of UID {original_uid}'s submission" - self.benchmarking = False self.step += 1 diff --git a/validator/weight_setting/winner_selection.py b/validator/weight_setting/winner_selection.py index 83b5e0c5..6235b9a0 100644 --- a/validator/weight_setting/winner_selection.py +++ b/validator/weight_setting/winner_selection.py @@ -1,8 +1,10 @@ from operator import itemgetter +from typing import cast +from neuron import Uid from neuron.submission_tester import CheckpointBenchmark, MetricData -TIER_SCORE_IMPROVEMENT_THRESHOLD = 1.05 +TIER_SCORE_IMPROVEMENT_THRESHOLD = 1.075 def get_contestant_scores(benchmarks: list[CheckpointBenchmark | None], baseline_metrics: MetricData): @@ -17,16 +19,13 @@ def get_contestant_scores(benchmarks: list[CheckpointBenchmark | None], baseline return sorted_contestants -def get_scores(contestants: list[tuple[int, float]], node_count: int) -> list[float]: +def get_tiers(contestants: list[tuple[Uid, float]]) -> list[list[Uid]]: if not contestants: return [] - last_uid, last_tier_score = contestants[0] + _, last_tier_score = contestants[0] - scores = [0.0] * node_count - winning_uids = [] - - uid = last_uid + tiers = [[]] for contestant in contestants: uid, score = contestant @@ -34,13 +33,21 @@ def get_scores(contestants: list[tuple[int, float]], node_count: int) -> list[fl if score > last_tier_score * TIER_SCORE_IMPROVEMENT_THRESHOLD: # No longer in top threshold last_tier_score = score - winning_uids.append(last_uid) + tiers.append([]) + + tiers[-1].append(uid) + + return tiers - last_uid = uid - winning_uids.append(uid) +def pick_winners(tiers: list[list[Uid]], blocks: list[int | None]) -> list[Uid]: + return [cast(int, min(tier, key=blocks.__getitem__)) for tier in tiers] + + +def get_scores(winners: list[Uid], node_count: int) -> list[float]: + scores = [0.0] * node_count - for winner_index, uid in enumerate(reversed(winning_uids)): - scores[uid] = 0.5 * (0.5 ** winner_index) + for index, winner in enumerate(reversed(winners)): + scores[winner] = 0.5 * (0.5 ** index) return scores From c7da0d1d5b8d491324807496657375f91c225f25 Mon Sep 17 00:00:00 2001 From: Ashley Wright Date: Tue, 22 Oct 2024 19:20:57 +0300 Subject: [PATCH 03/10] Share reward pool more evenly outside of winner --- validator/weight_setting/validator.py | 4 ++-- validator/weight_setting/winner_selection.py | 22 +++++++++++++++++--- 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/validator/weight_setting/validator.py b/validator/weight_setting/validator.py index 8bfefd15..22586bb2 100644 --- a/validator/weight_setting/validator.py +++ b/validator/weight_setting/validator.py @@ -548,8 +548,8 @@ def set_weights(self): contestants = get_contestant_scores(self.benchmarks, self.baseline_metrics) tiers = get_tiers(contestants) - winners = pick_winners(tiers, [info.block if info else None for info in self.contest_state.miner_info]) - weights = get_scores(winners, len(self.metagraph.nodes)) + blocks = [info.block if info else None for info in self.contest_state.miner_info] + weights = get_scores(tiers, blocks, len(self.metagraph.nodes)) self.send_wandb_metrics() diff --git a/validator/weight_setting/winner_selection.py b/validator/weight_setting/winner_selection.py index 6235b9a0..9b8a4d03 100644 --- a/validator/weight_setting/winner_selection.py +++ b/validator/weight_setting/winner_selection.py @@ -1,3 +1,4 @@ +from itertools import chain from operator import itemgetter from typing import cast @@ -44,10 +45,25 @@ def pick_winners(tiers: list[list[Uid]], blocks: list[int | None]) -> list[Uid]: return [cast(int, min(tier, key=blocks.__getitem__)) for tier in tiers] -def get_scores(winners: list[Uid], node_count: int) -> list[float]: +def get_scores(tiers: list[list[Uid]], blocks: list[int | None], node_count: int) -> list[float]: + ordered_tiers = [ + sorted(tier, key=blocks.__getitem__) for tier in tiers + ] + + max_len = max(len(tier) for tier in ordered_tiers) + + sorted_winners = list( + chain.from_iterable( + [tier[i] for tier in ordered_tiers if i < len(tier)] + for i in range(max_len) + ) + ) + scores = [0.0] * node_count - for index, winner in enumerate(reversed(winners)): - scores[winner] = 0.5 * (0.5 ** index) + scores[sorted_winners[0]] = 0.5 + + for uid in sorted_winners[1:]: + scores[uid] = 0.5 / (len(sorted_winners) - 1) return scores From 139cd801c9fcec8e146969a64a1bddd0575b50da Mon Sep 17 00:00:00 2001 From: Ashley Wright Date: Tue, 22 Oct 2024 19:28:05 +0300 Subject: [PATCH 04/10] Bump version --- validator/weight_setting/validator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/validator/weight_setting/validator.py b/validator/weight_setting/validator.py index 22586bb2..b2cffc02 100644 --- a/validator/weight_setting/validator.py +++ b/validator/weight_setting/validator.py @@ -49,7 +49,7 @@ from .wandb_args import add_wandb_args from .winner_selection import get_scores, get_contestant_scores, get_tiers, pick_winners -VALIDATOR_VERSION: tuple[int, int, int] = (4, 2, 0) +VALIDATOR_VERSION: tuple[int, int, int] = (4, 3, 0) VALIDATOR_VERSION_STRING = ".".join(map(str, VALIDATOR_VERSION)) WEIGHTS_VERSION = ( From c924097de87e07d4d958928a5d07ed11ae7d9685 Mon Sep 17 00:00:00 2001 From: Ashley Wright Date: Tue, 22 Oct 2024 20:56:54 +0300 Subject: [PATCH 05/10] Fix miner bugs --- miner/miner/submit.py | 1 + neuron/neuron/submission_tester/testing.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/miner/miner/submit.py b/miner/miner/submit.py index a9ebe4cd..3c04c2ae 100644 --- a/miner/miner/submit.py +++ b/miner/miner/submit.py @@ -146,6 +146,7 @@ def start_benchmarking(submission: CheckpointSubmission): baseline = generate_baseline( inputs, BASELINE_MODEL_DIRECTORY, + switch_user=False, cache=True, ) save_baseline_cache(baseline) diff --git a/neuron/neuron/submission_tester/testing.py b/neuron/neuron/submission_tester/testing.py index 789f21d0..9f2cb14c 100644 --- a/neuron/neuron/submission_tester/testing.py +++ b/neuron/neuron/submission_tester/testing.py @@ -132,7 +132,7 @@ def compare_checkpoints( with CURRENT_CONTEST.output_comparator() as output_comparator: def calculate_similarity(comparator: OutputComparator, baseline_output: GenerationOutput, optimized_output: GenerationOutput): try: - if cancelled_event.is_set(): + if cancelled_event and cancelled_event.is_set(): raise CancelledError() return comparator( From 3f6caa52448d574d892a697766bac8ce19e10df3 Mon Sep 17 00:00:00 2001 From: Ashley Wright Date: Tue, 22 Oct 2024 21:18:31 +0300 Subject: [PATCH 06/10] Set older submissions back a tier --- validator/weight_setting/validator.py | 2 +- validator/weight_setting/winner_selection.py | 37 +++++++++++--------- 2 files changed, 22 insertions(+), 17 deletions(-) diff --git a/validator/weight_setting/validator.py b/validator/weight_setting/validator.py index b2cffc02..678aa556 100644 --- a/validator/weight_setting/validator.py +++ b/validator/weight_setting/validator.py @@ -47,7 +47,7 @@ ) from .benchmarking_api import BenchmarkingApi, benchmarking_api from .wandb_args import add_wandb_args -from .winner_selection import get_scores, get_contestant_scores, get_tiers, pick_winners +from .winner_selection import get_scores, get_contestant_scores, get_tiers VALIDATOR_VERSION: tuple[int, int, int] = (4, 3, 0) VALIDATOR_VERSION_STRING = ".".join(map(str, VALIDATOR_VERSION)) diff --git a/validator/weight_setting/winner_selection.py b/validator/weight_setting/winner_selection.py index 9b8a4d03..2cf46b24 100644 --- a/validator/weight_setting/winner_selection.py +++ b/validator/weight_setting/winner_selection.py @@ -1,11 +1,10 @@ -from itertools import chain from operator import itemgetter -from typing import cast from neuron import Uid from neuron.submission_tester import CheckpointBenchmark, MetricData TIER_SCORE_IMPROVEMENT_THRESHOLD = 1.075 +WINNER_PERCENTAGE = 0.5 def get_contestant_scores(benchmarks: list[CheckpointBenchmark | None], baseline_metrics: MetricData): @@ -41,29 +40,35 @@ def get_tiers(contestants: list[tuple[Uid, float]]) -> list[list[Uid]]: return tiers -def pick_winners(tiers: list[list[Uid]], blocks: list[int | None]) -> list[Uid]: - return [cast(int, min(tier, key=blocks.__getitem__)) for tier in tiers] - - def get_scores(tiers: list[list[Uid]], blocks: list[int | None], node_count: int) -> list[float]: + if not tiers: + return [1.0] * node_count + ordered_tiers = [ sorted(tier, key=blocks.__getitem__) for tier in tiers ] - max_len = max(len(tier) for tier in ordered_tiers) + modified_tiers = [] + + last_tier = None + + for tier in ordered_tiers: + if last_tier: + modified_tiers.append([tier[0], *last_tier[1:]]) + else: + modified_tiers.append([tier[0]]) + + last_tier = tier - sorted_winners = list( - chain.from_iterable( - [tier[i] for tier in ordered_tiers if i < len(tier)] - for i in range(max_len) - ) - ) + modified_tiers.append(last_tier[1:]) scores = [0.0] * node_count - scores[sorted_winners[0]] = 0.5 + for index, tier in enumerate(modified_tiers): + incentive_pool = WINNER_PERCENTAGE * ((1 - WINNER_PERCENTAGE) ** index) + score = incentive_pool / len(tier) - for uid in sorted_winners[1:]: - scores[uid] = 0.5 / (len(sorted_winners) - 1) + for uid in tier: + scores[uid] = score return scores From a4fda07a7ce2c973540cb965313ea92a08e81b1d Mon Sep 17 00:00:00 2001 From: Ashley Wright Date: Tue, 22 Oct 2024 21:19:49 +0300 Subject: [PATCH 07/10] Fix comment --- validator/weight_setting/winner_selection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/validator/weight_setting/winner_selection.py b/validator/weight_setting/winner_selection.py index 2cf46b24..cd0355a0 100644 --- a/validator/weight_setting/winner_selection.py +++ b/validator/weight_setting/winner_selection.py @@ -31,7 +31,7 @@ def get_tiers(contestants: list[tuple[Uid, float]]) -> list[list[Uid]]: uid, score = contestant if score > last_tier_score * TIER_SCORE_IMPROVEMENT_THRESHOLD: - # No longer in top threshold + # New tier last_tier_score = score tiers.append([]) From c13592df7764645719a0991bd029e89d5d519a61 Mon Sep 17 00:00:00 2001 From: Ashley Wright Date: Tue, 22 Oct 2024 21:22:59 +0300 Subject: [PATCH 08/10] Change threshold back to 5% --- validator/weight_setting/winner_selection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/validator/weight_setting/winner_selection.py b/validator/weight_setting/winner_selection.py index cd0355a0..78a6fc11 100644 --- a/validator/weight_setting/winner_selection.py +++ b/validator/weight_setting/winner_selection.py @@ -3,7 +3,7 @@ from neuron import Uid from neuron.submission_tester import CheckpointBenchmark, MetricData -TIER_SCORE_IMPROVEMENT_THRESHOLD = 1.075 +TIER_SCORE_IMPROVEMENT_THRESHOLD = 1.05 WINNER_PERCENTAGE = 0.5 From 71ec5764a623b7e9ab803e2b05ac05de7ca97a44 Mon Sep 17 00:00:00 2001 From: Ashley Wright Date: Tue, 22 Oct 2024 21:32:56 +0300 Subject: [PATCH 09/10] Remove bad import --- neuron/neuron/submission_tester/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/neuron/neuron/submission_tester/__init__.py b/neuron/neuron/submission_tester/__init__.py index 3a1d68e9..d20d353a 100644 --- a/neuron/neuron/submission_tester/__init__.py +++ b/neuron/neuron/submission_tester/__init__.py @@ -3,4 +3,3 @@ from .testing import * from .metrics import * from .vram_monitor import * -from .hash import * \ No newline at end of file From a0173bc1d21e8abd3365feec7f46cb22ff429618 Mon Sep 17 00:00:00 2001 From: Ashley Wright Date: Tue, 22 Oct 2024 21:35:30 +0300 Subject: [PATCH 10/10] Remove unsued imports --- neuron/neuron/submission_tester/testing.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/neuron/neuron/submission_tester/testing.py b/neuron/neuron/submission_tester/testing.py index 2dcfbba3..161b0d04 100644 --- a/neuron/neuron/submission_tester/testing.py +++ b/neuron/neuron/submission_tester/testing.py @@ -1,7 +1,5 @@ import logging -from collections.abc import Iterable from concurrent.futures import ThreadPoolExecutor, CancelledError -from io import BytesIO from pathlib import Path from statistics import mean from threading import Event @@ -16,13 +14,8 @@ GenerationOutput, ModelRepositoryInfo, CURRENT_CONTEST, - Key, OutputComparator, ) -from pipelines import TextToImageRequest -from .inference_sandbox import InferenceSandbox -from .metrics import CheckpointBenchmark, MetricData, BaselineBenchmark -from .vram_monitor import VRamMonitor SANDBOX_DIRECTORY = Path("/sandbox") BASELINE_SANDBOX_DIRECTORY = Path("/baseline-sandbox")