Skip to content

Commit

Permalink
Fix log
Browse files Browse the repository at this point in the history
  • Loading branch information
MsRandom committed Oct 22, 2024
1 parent 8883abf commit c7e20ba
Showing 1 changed file with 12 additions and 11 deletions.
23 changes: 12 additions & 11 deletions neuron/neuron/submission_tester/testing.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import asyncio
import logging
from concurrent.futures import ThreadPoolExecutor, CancelledError
from pathlib import Path
Expand All @@ -17,7 +16,9 @@
GenerationOutput,
ModelRepositoryInfo,
CURRENT_CONTEST,
Key, OutputComparator, InvalidSubmissionError,
Key,
OutputComparator,
InvalidSubmissionError,
)
from .vram_monitor import VRamMonitor
from pipelines import TextToImageRequest
Expand Down Expand Up @@ -151,17 +152,17 @@ def compare_checkpoints(
logger.info(f"Submission {submission} marked as duplicate of hotkey {key}'s submission")

return benchmark
except Exception as e:
raise InvalidSubmissionError(f"Failed to run inference on {submission}") from e

logger.info(
f"Sample {index + 1} Generated\n"
f"Generation Time: {output.generation_time}s\n"
f"VRAM Usage: {output.vram_used}b\n"
f"Power Usage: {output.watts_used}W"
)
logger.info(
f"Sample {index + 1} Generated\n"
f"Generation Time: {output.generation_time}s\n"
f"VRAM Usage: {output.vram_used}b\n"
f"Power Usage: {output.watts_used}W"
)

outputs.append(output)
outputs.append(output)
except Exception as e:
raise InvalidSubmissionError(f"Failed to run inference on {submission}") from e

average_time = sum(output.generation_time for output in outputs) / len(outputs)
vram_used = max(output.vram_used for output in outputs)
Expand Down

0 comments on commit c7e20ba

Please sign in to comment.