diff --git a/search-processors/src/main/java/org/opensearch/searchpipelines/questionanswering/generative/GenerativeQAResponseProcessor.java b/search-processors/src/main/java/org/opensearch/searchpipelines/questionanswering/generative/GenerativeQAResponseProcessor.java index 2b01906e28..e3e86320dd 100644 --- a/search-processors/src/main/java/org/opensearch/searchpipelines/questionanswering/generative/GenerativeQAResponseProcessor.java +++ b/search-processors/src/main/java/org/opensearch/searchpipelines/questionanswering/generative/GenerativeQAResponseProcessor.java @@ -120,7 +120,6 @@ public SearchResponse processResponse(SearchRequest request, SearchResponse resp if (timeout == null || timeout == GenerativeQAParameters.SIZE_NULL_VALUE) { timeout = DEFAULT_PROCESSOR_TIME_IN_SECONDS; } - log.info("Timeout for this request: {} seconds.", timeout); String llmQuestion = params.getLlmQuestion(); String llmModel = params.getLlmModel() == null ? this.llmModel : params.getLlmModel(); @@ -129,17 +128,14 @@ public SearchResponse processResponse(SearchRequest request, SearchResponse resp } String conversationId = params.getConversationId(); - log.info("LLM model {}, conversation id: {}", llmModel, conversationId); Instant start = Instant.now(); Integer interactionSize = params.getInteractionSize(); if (interactionSize == null || interactionSize == GenerativeQAParameters.SIZE_NULL_VALUE) { interactionSize = DEFAULT_CHAT_HISTORY_WINDOW; } - log.info("Using interaction size of {}", interactionSize); List chatHistory = (conversationId == null) ? Collections.emptyList() : memoryClient.getInteractions(conversationId, interactionSize); - log.info("Retrieved chat history. ({})", getDuration(start)); Integer topN = params.getContextSize(); if (topN == null) { @@ -147,8 +143,6 @@ public SearchResponse processResponse(SearchRequest request, SearchResponse resp } List searchResults = getSearchResults(response, topN); - log.info("system_prompt: {}", systemPrompt); - log.info("user_instructions: {}", userInstructions); start = Instant.now(); try { ChatCompletionOutput output = llm @@ -314,15 +308,6 @@ public SearchResponseProcessor create( config, GenerativeQAProcessorConstants.CONFIG_NAME_USER_INSTRUCTIONS ); - log - .info( - "model_id {}, llm_model {}, context_field_list {}, system_prompt {}, user_instructions {}", - modelId, - llmModel, - contextFields, - systemPrompt, - userInstructions - ); return new GenerativeQAResponseProcessor( client, tag, diff --git a/search-processors/src/main/java/org/opensearch/searchpipelines/questionanswering/generative/client/ConversationalMemoryClient.java b/search-processors/src/main/java/org/opensearch/searchpipelines/questionanswering/generative/client/ConversationalMemoryClient.java index cb94b75748..5db677fe65 100644 --- a/search-processors/src/main/java/org/opensearch/searchpipelines/questionanswering/generative/client/ConversationalMemoryClient.java +++ b/search-processors/src/main/java/org/opensearch/searchpipelines/questionanswering/generative/client/ConversationalMemoryClient.java @@ -102,14 +102,12 @@ public List getInteractions(String conversationId, int lastN) { interactions.addAll(list); from += list.size(); maxResults -= list.size(); - log.info("Interactions: {}, from: {}, maxResults: {}", interactions, from, maxResults); } else if (response.hasMorePages()) { // If we didn't get any results back, we ignore this flag and break out of the loop // to avoid an infinite loop. // But in the future, we may support this mode, e.g. DynamoDB. break; } - log.info("Interactions: {}, from: {}, maxResults: {}", interactions, from, maxResults); allInteractionsFetched = !response.hasMorePages(); } while (from < lastN && !allInteractionsFetched);