From 6474e75c4a4264ccc9eab38489bf1930dc095ec7 Mon Sep 17 00:00:00 2001 From: michalkulakowski Date: Thu, 16 Jan 2025 08:35:58 +0100 Subject: [PATCH] Add missing llm test --- src/BUILD | 4 +++- src/test/http_openai_handler_test.cpp | 23 +++++++++++++++++++++++ src/test/llmnode_test.cpp | 18 ++++++++++++++++++ 3 files changed, 44 insertions(+), 1 deletion(-) diff --git a/src/BUILD b/src/BUILD index fbf1916903..8b537a0bf1 100644 --- a/src/BUILD +++ b/src/BUILD @@ -1893,7 +1893,9 @@ cc_test( "test/mediapipe_framework_test.cpp", "test/http_openai_handler_test.cpp", ], - "//:disable_mediapipe" : [], + "//:disable_mediapipe" : [ + "test/mediapipe_disabled_test.cpp", + ], }) + select({ "//:not_disable_python": [ # OvmsPyTensor is currently not used in OVMS core and is just a base for the binding. diff --git a/src/test/http_openai_handler_test.cpp b/src/test/http_openai_handler_test.cpp index 289219722f..b7e66182b5 100644 --- a/src/test/http_openai_handler_test.cpp +++ b/src/test/http_openai_handler_test.cpp @@ -482,3 +482,26 @@ TEST_F(HttpOpenAIHandlerParsingTest, ParsingMessagesEmptyContentArrayFails) { std::shared_ptr apiHandler = std::make_shared(doc, ovms::Endpoint::CHAT_COMPLETIONS, std::chrono::system_clock::now(), *tokenizer); EXPECT_EQ(apiHandler->parseMessages(), absl::InvalidArgumentError("Invalid message structure - content array is empty")); } + +TEST_F(HttpOpenAIHandlerTest, V3ApiWithNonLLMCalculator) { + handler.reset(); + server.setShutdownRequest(1); + t->join(); + server.setShutdownRequest(0); + SetUpServer(getGenericFullPathForSrcTest("/ovms/src/test/mediapipe/config_mediapipe_dummy_kfs.json").c_str()); + ASSERT_EQ(handler->parseRequestComponents(comp, "POST", endpoint, headers), ovms::StatusCode::OK); + std::string requestBody = R"( + { + "model": "mediapipeDummyKFS", + "stream": false, + "messages": [] + } + )"; + + EXPECT_CALL(*writer, PartialReplyEnd()).Times(0); + EXPECT_CALL(*writer, PartialReply(::testing::_)).Times(0); + EXPECT_CALL(*writer, IsDisconnected()).Times(0); + + auto status = handler->dispatchToProcessor("/v3/completions", requestBody, &response, comp, responseComponents, writer); + ASSERT_EQ(status, ovms::StatusCode::MEDIAPIPE_GRAPH_ADD_PACKET_INPUT_STREAM); +} diff --git a/src/test/llmnode_test.cpp b/src/test/llmnode_test.cpp index 1d0e3fba1b..f1f29ad08f 100644 --- a/src/test/llmnode_test.cpp +++ b/src/test/llmnode_test.cpp @@ -589,6 +589,24 @@ TEST_F(LLMFlowHttpTest, unaryChatCompletionsJsonN) { EXPECT_STREQ(parsedResponse["object"].GetString(), "chat.completion"); } +TEST_F(LLMFlowHttpTest, KFSApiRequestToChatCompletionsGraph) { + std::string requestBody = R"({ + "inputs" : [ + { + "name" : "input", + "shape" : [ 2, 2 ], + "datatype" : "UINT32", + "data" : [ 1, 2, 3, 4 ] + } + ] + })"; + std::vector> headers; + ASSERT_EQ(handler->parseRequestComponents(comp, "POST", "/v2/models/llmDummyKFS/versions/1/infer", headers), ovms::StatusCode::OK); + ASSERT_EQ( + handler->dispatchToProcessor(endpointChatCompletions, requestBody, &response, comp, responseComponents, writer), + ovms::StatusCode::MEDIAPIPE_GRAPH_ADD_PACKET_INPUT_STREAM); +} + TEST_F(LLMFlowHttpTest, unaryChatCompletionsJson) { std::string requestBody = R"( {