Skip to content

Commit

Permalink
维护
Browse files Browse the repository at this point in the history
  • Loading branch information
SK-la committed Nov 2, 2024
1 parent 670f63f commit 3984372
Show file tree
Hide file tree
Showing 8 changed files with 155 additions and 156 deletions.
13 changes: 4 additions & 9 deletions bin/Dispatch_data.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
#bin/Dispatch_data.py
import os
import json
from bin.config import get_config
from bin.get_info import get_info
from bin.conv_bmson import bms
Expand Down Expand Up @@ -28,12 +26,11 @@ def dispatch(data, settings):
sv = get_sv(data, offset, info, settings) if settings.convert_sv else ''
osu_content = generate_osu_file(config, info, sv, offset, samples, song_lg, notes_obj, new_cs)

print(f"New Folder Name: {info.new_folder}")
print(f"Sub Folder Name: {info.sub_folder}")
logger.info(f"New Folder Name: {info.new_folder}")
logger.info(f"Sub Folder Name: {info.sub_folder}")
logger.info(f"Osu Filename: {info.osu_filename}")
logger.info(f"Img Filename: {info.img_filename}")
print(f"Osu Filename: {info.osu_filename}")
print(f"Img Filename: {info.img_filename}")
print("调度转换完成")

return osu_content, info, main_audio

def scan_folder(folder_path):
Expand All @@ -48,5 +45,3 @@ def scan_folder(folder_path):
files.append(file_path)
return files

def print_collapsed(content, indent=2):
print(json.dumps(content, indent=indent))
108 changes: 50 additions & 58 deletions bin/Dispatch_file.py
Original file line number Diff line number Diff line change
@@ -1,43 +1,16 @@
#bin/Dispatch_file.py
import shutil
import json
import pathlib
import urllib.parse
import asyncio
import aiofiles
import shutil, json, pathlib
import asyncio, aiofiles
from bin.Dispatch_data import dispatch, scan_folder
from bin.custom_log import setup_custom_logger

logger = setup_custom_logger(__name__)
cache_lock = asyncio.Lock()
semaphore = asyncio.Semaphore(100) # 限制并发任务数量

def encode_filename(filename):
return urllib.parse.quote(filename, safe='')

def decode_filename(encoded_filename):
return urllib.parse.unquote(encoded_filename)

def get_unique_filename(existing_path):
base, extension = existing_path.stem, existing_path.suffix
counter = 1
while existing_path.exists():
existing_path = existing_path.with_name(f"{base}_old_{counter}{extension}")
counter += 1
return existing_path

async def copy_file_with_cache(file_path, destination_path, cache):
async with cache_lock:
if str(file_path) in cache:
return # 跳过已处理的文件
cache[str(file_path)] = True # 更新缓存
shutil.copy(file_path, destination_path)

async def process_file(bmson_file, output_folder, settings, cache, error_list):
async def process_file(bmson_file, output_folder, settings, error_list):
try:
output_folder = pathlib.Path(output_folder)
async with aiofiles.open(bmson_file, 'r', encoding='utf-8') as file:
data = json.loads(await file.read())
data['input_file_path'] = str(bmson_file) # 添加文件路径到数据中

osu_content, info, main_audio = dispatch(data, settings)

Expand All @@ -46,41 +19,60 @@ async def process_file(bmson_file, output_folder, settings, cache, error_list):
song_folder.mkdir(parents=True, exist_ok=True)
sub_folder = song_folder / info.sub_folder
sub_folder.mkdir(parents=True, exist_ok=True)
output_folder = song_folder

osu_file_path = song_folder / f"{info.osu_filename}.osu"
async with aiofiles.open(osu_file_path, 'w', encoding='utf-8') as file:
await file.write(osu_content)

# 获取目标文件夹中的所有文件名
existing_file_names_song = await get_existing_file_names(song_folder)
existing_file_names_sub = await get_existing_file_names(sub_folder)
tasks = {
"large": [],
"small": []
}

files = scan_folder(bmson_file.parent)

# 先进行所有文件的对比和忽略操作
for file_path in files:
file_path = pathlib.Path(file_path)
if settings.include_audio and file_path.suffix in {'.mp3', '.wav', '.ogg', '.wmv', '.mp4', '.avi'}:
destination_path = output_folder / file_path.name
if not destination_path.exists():
await copy_file_with_cache(file_path, destination_path, cache)
elif settings.include_images and file_path.suffix in {'.jpg', '.png'} and file_path.stem == pathlib.Path(info.image).stem:
destination_path = output_folder / file_path.name
shutil.copy(file_path, destination_path)
elif file_path.suffix in {'.wmv', '.mp4', '.avi'} and file_path.stem == pathlib.Path(info.vdo).stem:
destination_path = output_folder / file_path.name
shutil.copy(file_path, destination_path)
else:
sub_folder_path = output_folder / info.sub_folder
if not sub_folder_path.exists():
sub_folder_path.mkdir(parents=True, exist_ok=True)
destination_path = sub_folder_path / file_path.name
shutil.copy(file_path, destination_path)
if settings.include_images and file_path.suffix in {'.jpg', '.png'} and file_path.stem == pathlib.Path(
info.image).stem:
tasks["small"].append(copy_if_not_exists(file_path, song_folder / f"{info.img_filename}", existing_file_names_song))

osu_file_path = output_folder / f"{info.osu_filename}.osu"
async with aiofiles.open(osu_file_path, 'w', encoding='utf-8') as file:
await file.write(osu_content)
if settings.include_audio:
if file_path.suffix in {'.mp3', '.wav', '.ogg'} and file_path.stem == pathlib.Path(main_audio).stem:
tasks["large"].append(copy_if_not_exists(file_path, song_folder / f"{info.song}", existing_file_names_song))
elif file_path.suffix in {'.wmv', '.mp4', '.avi'} and file_path.stem == pathlib.Path(info.vdo).stem:
tasks["large"].append(copy_if_not_exists(file_path, song_folder / f"{info.vdo}", existing_file_names_song))
elif file_path.suffix in {'.mp3', '.wav', '.ogg'} and file_path.stem != pathlib.Path(main_audio).stem:
if not await compare_file_names(existing_file_names_sub, (sub_folder / file_path.name).name):
tasks["small"].append(copy_if_not_exists(file_path, sub_folder / file_path.name, existing_file_names_sub))
# 批量执行所有任务
await asyncio.gather(*tasks["small"])
await asyncio.gather(*tasks["large"])
return info

except Exception as e:
error_list.append((bmson_file, str(e)))
return None

# def get_unique_filename(existing_path):
# base, extension = existing_path.stem, existing_path.suffix
# counter = 1
# while existing_path.exists():
# existing_path = existing_path.with_name(f"{base}_old_{counter}{extension}")
# counter += 1
# return existing_path
async def copy_if_not_exists(file_path, destination_path, existing_file_names):
if not await compare_file_names(existing_file_names, destination_path.name):
await copy_file(file_path, destination_path)
else:
logger.info(f"文件 {destination_path} 已存在,跳过复制")

async def copy_file(file_path, destination_path):
try:
shutil.copy(file_path, destination_path)
logger.info(f"文件 {file_path} 成功复制到 {destination_path}")
except Exception as e:
logger.error(f"复制文件 {file_path}{destination_path} 时出错: {e}")

async def get_existing_file_names(folder_path):
return {file.name for file in folder_path.iterdir() if file.is_file()}

async def compare_file_names(existing_file_names, file_name):
return file_name in existing_file_names
36 changes: 18 additions & 18 deletions bin/Samples.py
Original file line number Diff line number Diff line change
@@ -1,36 +1,35 @@
#Samples.py
from bin.custom_log import setup_custom_logger

logger = setup_custom_logger(__name__)

def get_samples(data, info, settings):
def calculate_pulse_time(y):
return round(y * info.MpB )

main_audio = None
y_start = 0
y_end = 0
main_audio = None
samples = []
zero_x_notes = []
all_notes = []

for channel in data['sound_channels']:
for note in channel['notes']:
if note['x'] == 0:
zero_x_notes.append(note)
if len(zero_x_notes) == 1:
main_audio = channel['name']
y_start = note['y']
elif len(zero_x_notes) == 2:
y_end = note['y']
break
if len(zero_x_notes) == 2:
break
zero_x_notes = zero_x_notes[:2]
all_notes.append(note)
if note['x'] == 0 and main_audio is None:
main_audio = channel['name']

# 按 y 值排序
all_notes.sort(key=lambda note: note['y'])

Check notice on line 21 in bin/Samples.py

View workflow job for this annotation

GitHub Actions / Qodana for Python

Shadowing names from outer scopes

Shadows name 'note' from outer scope

y_start = all_notes[0]['y'] if all_notes else 0
y_end = all_notes[-1]['y'] if all_notes else 0

song_lg = round((y_end - y_start) * info.MpB)

valid_notes = []
for channel in data['sound_channels']:
hs = channel['name'].replace("sound\\", f"{info.sub_folder}/")
for note in channel['notes']:
if note['x'] == 0 and note not in zero_x_notes:
if 1 <= note['x'] <= 16:
valid_notes.append((note['y'], hs))

# 按 y 值排序
Expand All @@ -45,7 +44,8 @@ def calculate_pulse_time(y):
for y, hs in valid_notes:
note_time = calculate_pulse_time(y) + offset
samples.append(f"5,{note_time},0,\"{hs}\"")

print(f"\n脉冲: Start: {y_start}, End: {y_end}, Song Length: {song_lg}, Offset: {offset}")

print(f"脉冲: Start: {y_start}, End: {y_end}, Song Length: {song_lg}, Offset: {offset}")
logger.info(f"脉冲: Start: {y_start}, End: {y_end}, Song Length: {song_lg}, Offset: {offset}")
return samples, main_audio, offset, song_lg

64 changes: 21 additions & 43 deletions bin/aio.py
Original file line number Diff line number Diff line change
@@ -1,65 +1,44 @@
#bin/aio.py
import asyncio
import pathlib
import json
import os
import aiofiles
import shutil
import datetime
import asyncio, aiofiles
import datetime, pathlib
from bin.Dispatch_file import process_file
from bin.custom_log import setup_custom_logger
from bin.io_utils import find_duplicate_files

logger = setup_custom_logger(__name__)
cache_lock = asyncio.Lock()
semaphore = asyncio.Semaphore(10) # 限制并发任务数量
semaphore = asyncio.Semaphore(4000) # 限制并发任务数量

async def load_cache(cache_file_path):
if os.path.exists(cache_file_path):
async with aiofiles.open(cache_file_path, 'r', encoding='utf-8') as file:
return json.loads(await file.read())
return {}

async def save_cache(cache_file_path, cache_data):
async with aiofiles.open(cache_file_path, 'w', encoding='utf-8') as file:
await file.write(json.dumps(cache_data))

async def copy_file_with_cache(file_path, destination_path, cache):
async with cache_lock:
if str(file_path) in cache:
return # 跳过已处理的文件
cache[str(file_path)] = True # 更新缓存
shutil.copy(file_path, destination_path)

async def process_sound_folder(sound_folder, output_folder, cache):
for sound_file in sound_folder.glob("*.*"):
destination_path = output_folder / sound_file.name
await copy_file_with_cache(sound_file, destination_path, cache)

async def process_folder(folder_path, output_folder_path, settings, cache, error_list):
async def process_folder(folder_path, output_folder_path, settings, error_list):
try:
sound_folder = folder_path / "sound"
if sound_folder.exists():
await process_sound_folder(sound_folder, output_folder_path, cache)

for bmson_file in folder_path.glob("*.bmson"):
async with semaphore:
await process_file(bmson_file, output_folder_path, settings, cache, error_list)
try:
await process_file(bmson_file, output_folder_path, settings, error_list)
except Exception as e:
error_list.append((bmson_file, str(e)))
logger.error(f"Error processing file {bmson_file}: {e}")
except Exception as e:
error_list.append((folder_path, str(e)))
logger.error(f"Error processing folder {folder_path}: {e}")

async def start_conversion(input_folder_path, output_folder_path, settings, cache_file_path):
async def start_conversion(input_folder_path, output_folder_path, settings):
input_folder_path = pathlib.Path(input_folder_path)
output_folder_path = pathlib.Path(output_folder_path)
cache = await load_cache(cache_file_path)
error_list = []

# 查找重复文件
duplicates = await find_duplicate_files(input_folder_path)
if duplicates:
logger.info("Found duplicate files:")
for original, duplicate in duplicates:
logger.info(f"Original: {original}, Duplicate: {duplicate}")

tasks = []
for folder in input_folder_path.iterdir():
if folder.is_dir():
tasks.append(process_folder(folder, output_folder_path, settings, cache, error_list))
tasks.append(process_folder(folder, output_folder_path, settings, error_list))

await asyncio.gather(*tasks)
await save_cache(cache_file_path, cache)

# 汇总出错的文件或文件夹
if error_list:
Expand All @@ -72,5 +51,4 @@ async def start_conversion(input_folder_path, output_folder_path, settings, cach
await file.write("以下文件或文件夹处理时出错:\n")
for folder, error in error_list:
await file.write(f"{folder}: {error}\n")
logger.error(f"错误日志已保存到 {error_log_file}")

logger.error(f"错误日志已保存到 {error_log_file}")
45 changes: 33 additions & 12 deletions bin/custom_log.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,39 +5,60 @@
import asyncio
from typing import Any, Dict


class CustomFormatter(logging.Formatter):
def format(self, record):
if not hasattr(record, 'pastime'):
record.pastime = 'N/A'
return super().format(record)


def setup_custom_logger(name):
formatter = CustomFormatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s - %(pastime)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)

# 控制台处理器
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
console_handler.setLevel(logging.WARNING) # 只显示 WARNING 及以上级别的日志

# 文件处理器
log_folder = pathlib.Path("log")
log_folder.mkdir(exist_ok=True) # 确保 log 文件夹存在
file_handler = logging.FileHandler(log_folder / "true_log.txt", encoding='utf-8')
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.DEBUG) # 记录所有级别的日志

logger = logging.getLogger(name)
logger.addHandler(handler)
logger.addHandler(console_handler)
logger.addHandler(file_handler)
logger.setLevel(logging.DEBUG)
return logger


def handle_exception(_: asyncio.AbstractEventLoop, context: Dict[str, Any]) -> None:
# 获取异常信息
exception = context.get("exception")
message = context.get("message", "Unhandled exception")

# 记录异常信息
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
error_log_file = pathlib.Path(f"error_log_{timestamp}.txt")
with open(error_log_file, 'a', encoding='utf-8') as file:
file.write(f"{timestamp} - {message}\n")
if exception or message != "Unhandled exception":
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
error_log_file = pathlib.Path(f"log/error_log_{timestamp}.txt")
with open(error_log_file, 'a', encoding='utf-8') as file:
file.write(f"{timestamp} - {message}\n")
if exception:
file.write(f"{timestamp} - {exception}\n")

# 打印异常信息到控制台
logging.error(f"{timestamp} - {message}")
if exception:
file.write(f"{exception}\n")
logging.error(f"{timestamp} - {exception}")

# 打印异常信息到控制台
logging.error(f"{timestamp} - {message}")
if exception:
logging.error(exception)

# 设置全局异常处理器
loop = asyncio.get_event_loop()
loop.set_exception_handler(handle_exception)

# def sanitize_filename(filename):
# # 替换非法字符为下划线
# return re.sub(r'[<>:"/\\|?*]', '_', filename)
Loading

0 comments on commit 3984372

Please sign in to comment.