diff --git a/crates/voicevox_core/src/devices.rs b/crates/voicevox_core/src/devices.rs index 654e8b981..634f5483f 100644 --- a/crates/voicevox_core/src/devices.rs +++ b/crates/voicevox_core/src/devices.rs @@ -5,7 +5,7 @@ use std::{ }; use derive_more::BitAnd; -use serde::{Deserialize, Serialize}; +use serde::Serialize; pub(crate) fn test_gpus( gpus: impl IntoIterator, @@ -65,7 +65,8 @@ fn test_gpu( /// # Ok(()) /// # } /// ``` -#[derive(Clone, Copy, PartialEq, Eq, Debug, BitAnd, Serialize, Deserialize)] +// 互換性保証のため、`Deserialize`は実装するべきではない +#[derive(Clone, Copy, PartialEq, Eq, Debug, BitAnd, Serialize)] #[non_exhaustive] pub struct SupportedDevices { /// CPUが利用可能。 diff --git a/crates/voicevox_core_c_api/tests/e2e/testcases/compatible_engine.rs b/crates/voicevox_core_c_api/tests/e2e/testcases/compatible_engine.rs index 6e31f557e..6b5faa11d 100644 --- a/crates/voicevox_core_c_api/tests/e2e/testcases/compatible_engine.rs +++ b/crates/voicevox_core_c_api/tests/e2e/testcases/compatible_engine.rs @@ -1,12 +1,12 @@ // エンジンを起動してyukarin_s・yukarin_sa・decodeの推論を行う +use std::collections::HashMap; use std::sync::LazyLock; use std::{cmp::min, ffi::CStr}; use assert_cmd::assert::AssertResult; use libloading::Library; use serde::{Deserialize, Serialize}; -use voicevox_core::SupportedDevices; use test_util::{c_api::CApi, EXAMPLE_DATA}; @@ -33,7 +33,9 @@ impl assert_cdylib::TestCase for TestCase { { let supported_devices = lib.supported_devices(); - serde_json::from_str::(CStr::from_ptr(supported_devices).to_str()?)?; + serde_json::from_str::>( + CStr::from_ptr(supported_devices).to_str()?, + )?; } assert!(lib.initialize(false, 0, false)); diff --git a/crates/voicevox_core_c_api/tests/e2e/testcases/global_info.rs b/crates/voicevox_core_c_api/tests/e2e/testcases/global_info.rs index c4dd21c32..d1a9e7763 100644 --- a/crates/voicevox_core_c_api/tests/e2e/testcases/global_info.rs +++ b/crates/voicevox_core_c_api/tests/e2e/testcases/global_info.rs @@ -6,7 +6,6 @@ use libloading::Library; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, DisplayFromStr}; use test_util::c_api::{self, CApi, VoicevoxLoadOnnxruntimeOptions, VoicevoxResultCode}; -use voicevox_core::SupportedDevices; use crate::{ assert_cdylib::{self, case, Utf8Output}, @@ -65,7 +64,9 @@ impl assert_cdylib::TestCase for TestCase { supported_devices.as_mut_ptr(), )); let supported_devices = supported_devices.assume_init(); - serde_json::from_str::(CStr::from_ptr(supported_devices).to_str()?)?; + serde_json::from_str::>( + CStr::from_ptr(supported_devices).to_str()?, + )?; lib.voicevox_json_free(supported_devices); } diff --git a/crates/voicevox_core_java_api/lib/src/main/java/jp/hiroshiba/voicevoxcore/GlobalInfo.java b/crates/voicevox_core_java_api/lib/src/main/java/jp/hiroshiba/voicevoxcore/GlobalInfo.java index e8214a480..562b3fcfe 100644 --- a/crates/voicevox_core_java_api/lib/src/main/java/jp/hiroshiba/voicevoxcore/GlobalInfo.java +++ b/crates/voicevox_core_java_api/lib/src/main/java/jp/hiroshiba/voicevoxcore/GlobalInfo.java @@ -34,6 +34,8 @@ public static String getVersion() { * *

あくまでONNX Runtimeが対応しているデバイスの情報であることに注意。GPUが使える環境ではなかったとしても {@link #cuda} や {@link #dml} は * {@code true} を示しうる。 + * + *

{@code Gson#fromJson} によりJSONから変換することはできない。その試みは {@link UnsupportedOperationException} となる。 */ public static class SupportedDevices { /** @@ -71,9 +73,14 @@ public static class SupportedDevices { public final boolean dml; private SupportedDevices() { - this.cpu = false; - this.cuda = false; - this.dml = false; + throw new UnsupportedOperationException("You cannot deserialize `SupportedDevices`"); + } + + /** accessed only via JNI */ + private SupportedDevices(boolean cpu, boolean cuda, boolean dml) { + this.cpu = cpu; + this.cuda = cuda; + this.dml = dml; } } } diff --git a/crates/voicevox_core_java_api/lib/src/main/java/jp/hiroshiba/voicevoxcore/blocking/Onnxruntime.java b/crates/voicevox_core_java_api/lib/src/main/java/jp/hiroshiba/voicevoxcore/blocking/Onnxruntime.java index 248ae141c..6f44cdefb 100644 --- a/crates/voicevox_core_java_api/lib/src/main/java/jp/hiroshiba/voicevoxcore/blocking/Onnxruntime.java +++ b/crates/voicevox_core_java_api/lib/src/main/java/jp/hiroshiba/voicevoxcore/blocking/Onnxruntime.java @@ -2,7 +2,6 @@ import static jp.hiroshiba.voicevoxcore.GlobalInfo.SupportedDevices; -import com.google.gson.Gson; import jakarta.annotation.Nonnull; import jakarta.annotation.Nullable; import java.util.Optional; @@ -122,16 +121,10 @@ private Onnxruntime(@Nullable String filename) { * @return {@link SupportedDevices}。 */ public SupportedDevices supportedDevices() { - Gson gson = new Gson(); - String supportedDevicesJson = rsSupportedDevices(); - SupportedDevices supportedDevices = gson.fromJson(supportedDevicesJson, SupportedDevices.class); - if (supportedDevices == null) { - throw new NullPointerException("supported_devices"); - } - return supportedDevices; + return rsSupportedDevices(); } private native void rsNew(@Nullable String filename); - private native String rsSupportedDevices(); + private native SupportedDevices rsSupportedDevices(); } diff --git a/crates/voicevox_core_java_api/src/onnxruntime.rs b/crates/voicevox_core_java_api/src/onnxruntime.rs index 546ceba56..45ad29889 100644 --- a/crates/voicevox_core_java_api/src/onnxruntime.rs +++ b/crates/voicevox_core_java_api/src/onnxruntime.rs @@ -7,7 +7,7 @@ use jni::{ JNIEnv, }; -use crate::common::throw_if_err; +use crate::{common::throw_if_err, object}; // SAFETY: voicevox_core_java_apiを構成するライブラリの中に、これと同名のシンボルは存在しない #[duplicate_item( @@ -54,8 +54,18 @@ unsafe extern "system" fn Java_jp_hiroshiba_voicevoxcore_blocking_Onnxruntime_rs let this = *env.get_rust_field::<_, _, &'static voicevox_core::blocking::Onnxruntime>( &this, "handle", )?; - let json = this.supported_devices()?.to_json().to_string(); - let json = env.new_string(json)?; - Ok(json.into_raw()) + let devices = this.supported_devices()?; + + assert!(match devices.to_json() { + serde_json::Value::Object(o) => o.len() == 3, // `cpu`, `cuda`, `dml` + _ => false, + }); + + let obj = env.new_object( + object!("GlobalInfo$SupportedDevices"), + "(ZZZ)V", + &[devices.cpu.into(), devices.cuda.into(), devices.dml.into()], + )?; + Ok(obj.into_raw()) }) } diff --git a/crates/voicevox_core_python_api/python/voicevox_core/_models/__init__.py b/crates/voicevox_core_python_api/python/voicevox_core/_models/__init__.py index 788d5e98b..62611636e 100644 --- a/crates/voicevox_core_python_api/python/voicevox_core/_models/__init__.py +++ b/crates/voicevox_core_python_api/python/voicevox_core/_models/__init__.py @@ -1,8 +1,9 @@ import dataclasses -from typing import Literal, NewType, TypeAlias +from typing import Literal, NewType, NoReturn, TypeAlias from uuid import UUID import pydantic +from pydantic_core import ArgsKwargs from .._rust import _to_zenkaku, _validate_pronunciation from ._please_do_not_use import _Reserved @@ -137,6 +138,9 @@ class SupportedDevices: あくまでONNX Runtimeが対応しているデバイスの情報であることに注意。GPUが使える環境ではなかったとしても ``cuda`` や ``dml`` は ``True`` を示しうる。 + + JSONからの変換も含め、VOICEVOX CORE以外が作ることはできない。作ろうとした場合 + ``TypeError`` となる。 """ cpu: bool @@ -162,6 +166,13 @@ class SupportedDevices: (``DmlExecutionProvider``)に対応する。必要な環境についてはそちらを参照。 """ + @pydantic.model_validator(mode="before") + @staticmethod + def _deny_unless_from_pyo3(data: ArgsKwargs) -> ArgsKwargs: + if "I AM FROM PYO3" not in data.args: + raise TypeError("You cannot deserialize `SupportedDevices`") + return ArgsKwargs((), kwargs=data.kwargs) + AccelerationMode: TypeAlias = Literal["AUTO", "CPU", "GPU"] | _Reserved """ diff --git a/crates/voicevox_core_python_api/src/convert.rs b/crates/voicevox_core_python_api/src/convert.rs index 6a1cbe8ab..a08d973f7 100644 --- a/crates/voicevox_core_python_api/src/convert.rs +++ b/crates/voicevox_core_python_api/src/convert.rs @@ -10,7 +10,7 @@ use pyo3::{ use serde::{de::DeserializeOwned, Serialize}; use serde_json::json; use uuid::Uuid; -use voicevox_core::{AccelerationMode, AccentPhrase, StyleId, VoiceModelMeta}; +use voicevox_core::{AccelerationMode, AccentPhrase, StyleId, SupportedDevices, VoiceModelMeta}; use crate::{ AnalyzeTextError, GetSupportedDevicesError, GpuSupportError, InitInferenceRuntimeError, @@ -255,6 +255,25 @@ pub(crate) impl voicevox_core::Result { } } +#[ext(SupportedDevicesExt)] +impl SupportedDevices { + pub(crate) fn to_py(self, py: Python<'_>) -> PyResult<&PyAny> { + let class = py + .import("voicevox_core")? + .getattr("SupportedDevices")? + .downcast()?; + assert!(match self.to_json() { + serde_json::Value::Object(o) => o.len() == 3, // `cpu`, `cuda`, `dml` + _ => false, + }); + PyAny::call( + class, + ("I AM FROM PYO3",), + Some([("cpu", self.cpu), ("cuda", self.cuda), ("dml", self.dml)].into_py_dict(py)), + ) + } +} + #[ext] impl std::result::Result { fn into_py_value_result(self) -> PyResult { diff --git a/crates/voicevox_core_python_api/src/lib.rs b/crates/voicevox_core_python_api/src/lib.rs index bc5114cea..2472a5f98 100644 --- a/crates/voicevox_core_python_api/src/lib.rs +++ b/crates/voicevox_core_python_api/src/lib.rs @@ -303,7 +303,8 @@ mod blocking { use voicevox_core::{AccelerationMode, AudioQuery, StyleId, UserDictWord}; use crate::{ - convert::VoicevoxCoreResultExt as _, Closable, SingleTasked, VoiceModelFilePyFields, + convert::{SupportedDevicesExt as _, VoicevoxCoreResultExt as _}, + Closable, SingleTasked, VoiceModelFilePyFields, }; #[pyclass] @@ -415,12 +416,7 @@ mod blocking { } fn supported_devices<'py>(&self, py: Python<'py>) -> PyResult<&'py PyAny> { - let class = py - .import("voicevox_core")? - .getattr("SupportedDevices")? - .downcast()?; - let s = self.0.supported_devices().into_py_result(py)?; - crate::convert::to_pydantic_dataclass(s, class) + self.0.supported_devices().into_py_result(py)?.to_py(py) } } @@ -888,7 +884,10 @@ mod asyncio { use uuid::Uuid; use voicevox_core::{AccelerationMode, AudioQuery, StyleId, UserDictWord}; - use crate::{convert::VoicevoxCoreResultExt as _, Closable, Tokio, VoiceModelFilePyFields}; + use crate::{ + convert::{SupportedDevicesExt as _, VoicevoxCoreResultExt as _}, + Closable, Tokio, VoiceModelFilePyFields, + }; #[pyclass] #[derive(Clone)] @@ -1017,12 +1016,7 @@ mod asyncio { } fn supported_devices<'py>(&self, py: Python<'py>) -> PyResult<&'py PyAny> { - let class = py - .import("voicevox_core")? - .getattr("SupportedDevices")? - .downcast()?; - let s = self.0.supported_devices().into_py_result(py)?; - crate::convert::to_pydantic_dataclass(s, class) + self.0.supported_devices().into_py_result(py)?.to_py(py) } }