diff --git a/config/openai.json b/config/openai.json index 7ca9729..37a5879 100644 --- a/config/openai.json +++ b/config/openai.json @@ -4,7 +4,8 @@ { "action": "select", "description": "If the user likes or selects any item, this action should be used." }, { "action": "init", "description": "If the user wants to place an order after search and select and has shared the billing details." }, { "action": "confirm", "description": "Confirm an order. This action gets called when users confirms an order." }, - { "action": "clear", "description": "If the user wants to clear the session or restart session or chat." } + { "action": "clear_chat", "description": "If the user wants to clear the session or restart session or chat." }, + { "action": "clear_all", "description": "If the user wants to clear the complete session or the profile." } ], "SCHEMA_TRANSLATION_CONTEXT": [ { "role": "system", "content": "Your job is to identify the endpoint, method and request body from the given schema, based on the last user input and return the extracted details in the following JSON structure : \n\n {'url':'', 'method':'', 'body':''}'"}, diff --git a/controllers/Bot.js b/controllers/Bot.js index a197892..745098c 100644 --- a/controllers/Bot.js +++ b/controllers/Bot.js @@ -86,6 +86,9 @@ async function process_text(req, res) { }; const EMPTY_SESSION = { + profile:{ + misc: {} + }, sessionId: sender, text : [], actions : { @@ -104,6 +107,21 @@ async function process_text(req, res) { } try{ + + // Get profile + const profileResponse = await ai.get_profile_from_text(message, session.profile); + if(profileResponse.status){ + session.profile = { + ...session.profile, + ...profileResponse.data, + misc: { + ...session.profile.misc, + ...profileResponse.data.misc + } + }; + } + + // get action ai.action = await ai.get_beckn_action_from_text(message, session.actions.formatted); // Reset actions context if action is search @@ -112,10 +130,17 @@ async function process_text(req, res) { } - if(ai.action?.action === 'clear'){ - session = EMPTY_SESSION; + if(ai.action?.action === 'clear_chat'){ + session = { + ...EMPTY_SESSION, + profile: session.profile + }; response.formatted = 'Session cleared! You can start a new session now.'; } + else if(ai.action?.action === 'clear_all'){ + session = EMPTY_SESSION; + response.formatted = 'Session & profile cleared! You can start a new session now.'; + } else if(ai.action?.action == null) { // get ai response response.formatted = await ai.get_ai_response_to_query(message, session.text); @@ -126,7 +151,7 @@ async function process_text(req, res) { session.text.push({ role: 'assistant', content: response.formatted }); } else{ - response = await process_action(ai.action, message, session.actions, sender); + response = await process_action(ai.action, message, session, sender); // update actions if(response.formatted && response.raw){ @@ -161,10 +186,10 @@ async function process_text(req, res) { * Can be reused by gpt bots if required * @param {*} action * @param {*} text - * @param {*} actions_context + * @param {*} session * @returns */ -async function process_action(action, text, actions_context, sender=null){ +async function process_action(action, text, session, sender=null){ let ai = new AI(); let response = { raw: null, @@ -179,11 +204,11 @@ async function process_action(action, text, actions_context, sender=null){ const schema = await ai.get_schema_by_action(action.action); // Get config - const beckn_context = await ai.get_context_by_instruction(text, actions_context.raw); + const beckn_context = await ai.get_context_by_instruction(text, session.actions.raw); // Prepare request if(schema && beckn_context){ - const request = await ai.get_beckn_request_from_text(text, actions_context.raw, beckn_context, schema); + const request = await ai.get_beckn_request_from_text(text, session.actions.raw, beckn_context, schema); if(request.status){ // call api @@ -196,7 +221,8 @@ async function process_action(action, text, actions_context, sender=null){ response.raw = request.data.body.context.action==='search' ? await ai.compress_search_results(api_response.data) : api_response.data const formatted_response = await ai.get_text_from_json( api_response.data, - [...actions_context.formatted, { role: 'user', content: text }] + [...session.actions.formatted, { role: 'user', content: text }], + session.profile ); response.formatted = formatted_response.message; } diff --git a/server.js b/server.js index e3f73a4..3d8a6cd 100644 --- a/server.js +++ b/server.js @@ -5,7 +5,7 @@ import express from 'express' import bodyParser from 'body-parser' import logger from './utils/logger.js' import messageController from './controllers/Bot.js' -import DBService from './services/DBService.js' +// import DBService from './services/DBService.js' import { cancelBooking, updateCatalog, @@ -21,13 +21,15 @@ app.use(bodyParser.json()) // Define endpoints here // app.post('/act', actions.act) -app.post('/webhook', messageController.process_wa_webhook) +app.post('/webhook', messageController.process_text) app.post('/notify', notify) app.post('/cancel-booking', cancelBooking) app.post('/update-catalog', updateCatalog) + + // Reset all sessions -const db = new DBService() -await db.clear_all_sessions() +// const db = new DBService() +// await db.clear_all_sessions() // Start the Express server app.listen(process.env.SERVER_PORT, () => { diff --git a/services/AI.js b/services/AI.js index a3b34c4..7ac25e9 100644 --- a/services/AI.js +++ b/services/AI.js @@ -161,7 +161,7 @@ class AI { * @param {*} schema * @returns */ - async get_beckn_request_from_text(instruction, context=[], beckn_context={}, schema={}){ + async get_beckn_request_from_text(instruction, context=[], beckn_context={}, schema={}, profile={}){ logger.info(`Getting beckn request from instruction : ${instruction}`) let action_response = { @@ -173,6 +173,7 @@ class AI { let openai_messages = [ { "role": "system", "content": `Schema definition: ${JSON.stringify(schema)}` }, ...openai_config.SCHEMA_TRANSLATION_CONTEXT, + {"role": "system", "content": `This is the user profile that you can use for transactions : ${JSON.stringify(profile)}`}, {"role": "system", "content": `Following is the conversation history`}, ...context, { "role": "user", "content": instruction } @@ -249,26 +250,27 @@ class AI { } - async get_text_from_json(json_response, context=[], model = process.env.OPENAI_MODEL_ID) { + async get_text_from_json(json_response, context=[], profile={}) { const desired_output = { status: true, message: "" }; const openai_messages = [ {role: 'system', content: `Your job is to analyse the given json object and provided chat history to convert the json response into a human readable, less verbose, whatsapp friendly message and return this in a json format as given below: \n ${JSON.stringify(desired_output)}. If the json is invalid or empty, the status in desired output should be false with the relevant error message.`}, - {role: 'system', content: `User can select an item after seeing the search results or directly 'init' by selecting an item and sharing their billing details. You should ask user what they want to do next.`}, - {role: 'system', content: `If its a 'select' response, do ask for billing details to initiate the order.`}, + {role: 'system', content: `User can select an item after seeing the search results. You should ask user what they want to do next.`}, + {role: 'system', content: `If its a 'select' response you should ask if the user wants to place the order. If the user profile does not have billing details such as name, phone, email, you should also ask the user to share the billing details dot place the order.`}, {role: 'system', content: `If its an 'init' response, you should ask for confirmation.`}, {role: 'system', content: `If its a 'confirm' response, you should include the order id in your response.`}, {role: 'system', content: `You should show search results in a listing format with important details mentioned such as name, price, rating, location, description or summary etc. and a call to action to select the item. `}, {role: 'system', content: `If the given json looks like an error, summarize teh error but for humans, do not include any code or technical details. Produce some user friendly fun messages.`}, - ...context.filter(c => c.role === 'user'), + {role: 'system', content: `User pforile : ${JSON.stringify(profile)}`}, + ...context, {role: 'assistant',content: `${JSON.stringify(json_response)}`}, ] try { const completion = await openai.chat.completions.create({ messages: openai_messages, - model: model, + model: process.env.OPENAI_MODEL_ID, temperature: 0, response_format: { type: 'json_object' }, }) @@ -282,7 +284,46 @@ class AI { } } - } + } + + async get_profile_from_text(message, profile={}){ + const desired_output = { + "name": "", + "email": "", + "phone": "", + "address": "", + "gender": "", + "age" : "" + } + + const openai_messages = [ + { role: 'system', content: `Please analyse the given user message and extract profile information about the user which is not already part of their profile. The desired outout format should be the following json ${JSON.stringify(desired_output)}` }, + { role: 'system', content: `You must not send any vague or incomplete information or anything that does not tell something about the user profile.` }, + { role: 'system', content: `Any profile infromation that does not match the desired output should be sent under a key 'misc'. You are not always required to return a response, return empty json if no profile information extracted.` }, + { role: 'system', content: `Existing profile : ${JSON.stringify(profile)}`}, + { role: 'user', content: message } + ] + + try { + const completion = await openai.chat.completions.create({ + messages: openai_messages, + model: process.env.OPENAI_MODEL_ID, + temperature: 0, + response_format: { type: 'json_object' }, + }) + let response = JSON.parse(completion.choices[0].message.content) + return { + status: true, + data: response + }; + } catch (e) { + logger.error(e) + return { + status:false, + message:e.message + } + } + } } export default AI; \ No newline at end of file diff --git a/tests/data/api_responses/on_confirm.json b/tests/data/api_responses/on_confirm.json index 8f55e1f..3c441ad 100644 --- a/tests/data/api_responses/on_confirm.json +++ b/tests/data/api_responses/on_confirm.json @@ -15,7 +15,8 @@ } }, "bpp_id": "mit-ps-energy.becknprotocol.io", - "bpp_uri": "https://mit-ps-energy.becknprotocol.io" + "bpp_uri": "https://mit-ps-energy.becknprotocol.io", + "base_url": "https://mit-ps-bap-client.becknprotocol.io" }, "responses": [ { diff --git a/tests/data/api_responses/on_init.json b/tests/data/api_responses/on_init.json index 26dd2a3..14fb88c 100644 --- a/tests/data/api_responses/on_init.json +++ b/tests/data/api_responses/on_init.json @@ -10,7 +10,8 @@ "bap_id": "mit-ps-bap.becknprotocol.io", "bap_uri": "https://mit-ps-bap.becknprotocol.io", "bpp_id": "mit-ps-energy.becknprotocol.io", - "bpp_uri": "http://mit-ps-energy.becknprotocol.io" + "bpp_uri": "http://mit-ps-energy.becknprotocol.io", + "base_url": "https://mit-ps-bap-client.becknprotocol.io" }, "responses": [ { diff --git a/tests/data/api_responses/on_search.json b/tests/data/api_responses/on_search.json index 23ae872..0bc9460 100644 --- a/tests/data/api_responses/on_search.json +++ b/tests/data/api_responses/on_search.json @@ -10,7 +10,8 @@ "bap_id": "mit-ps-bap.becknprotocol.io", "bap_uri": "https://mit-ps-bap.becknprotocol.io", "bpp_id": "mit-ps-energy.becknprotocol.io", - "bpp_uri": "http://mit-ps-energy.becknprotocol.io" + "bpp_uri": "http://mit-ps-energy.becknprotocol.io", + "base_url": "https://mit-ps-bap-client.becknprotocol.io" }, "responses": [ { diff --git a/tests/data/api_responses/on_search_compressed.json b/tests/data/api_responses/on_search_compressed.json index e700f15..f160a68 100644 --- a/tests/data/api_responses/on_search_compressed.json +++ b/tests/data/api_responses/on_search_compressed.json @@ -10,7 +10,8 @@ "bap_id": "mit-ps-bap.becknprotocol.io", "bap_uri": "https://mit-ps-bap.becknprotocol.io", "bpp_id": "mit-ps-energy.becknprotocol.io", - "bpp_uri": "http://mit-ps-energy.becknprotocol.io" + "bpp_uri": "http://mit-ps-energy.becknprotocol.io", + "base_url": "https://mit-ps-bap-client.becknprotocol.io" }, "responses": { "providers": [ diff --git a/tests/data/api_responses/on_select.json b/tests/data/api_responses/on_select.json index 9b6af4f..630d3d1 100644 --- a/tests/data/api_responses/on_select.json +++ b/tests/data/api_responses/on_select.json @@ -10,7 +10,8 @@ "bap_id": "mit-ps-bap.becknprotocol.io", "bap_uri": "https://mit-ps-bap.becknprotocol.io", "bpp_id": "mit-ps-energy.becknprotocol.io", - "bpp_uri": "http://mit-ps-energy.becknprotocol.io" + "bpp_uri": "http://mit-ps-energy.becknprotocol.io", + "base_url": "https://mit-ps-bap-client.becknprotocol.io" }, "responses": [ { diff --git a/tests/unit/services/ai.test.js b/tests/unit/services/ai.test.js index b2532cb..ee470ba 100644 --- a/tests/unit/services/ai.test.js +++ b/tests/unit/services/ai.test.js @@ -86,10 +86,16 @@ describe('Test cases for services/ai/get_beckn_action_from_text()', () => { expect(response.action).to.equal('search'); }); - it('Should return `clear` action when user wishes to clear the chat', async () => { + it('Should return `clear_chat` action when user wishes to clear the chat', async () => { const response = await ai.get_beckn_action_from_text('Can you clear this session ', hotel_session.data.actions); expect(response).to.have.property('action') - expect(response.action).to.equal('clear'); + expect(response.action).to.equal('clear_chat'); + }); + + it('Should return `clear_all` action when user wishes to clear the the entire session including profile.', async () => { + const response = await ai.get_beckn_action_from_text('Can you clear this session along with my profile.', hotel_session.data.actions); + expect(response).to.have.property('action') + expect(response.action).to.equal('clear_all'); }); }) @@ -128,7 +134,7 @@ describe('Test cases for get_schema_by_action() function', () => { expect(response).to.be.false; }) - it('Should return false if inavlid action found', async () => { + it('Should return false if invalid action found', async () => { ai.action = {action: 'invalid'}; const response = await ai.get_schema_by_action(`I'm looking for some hotels`); expect(response).to.be.false; @@ -241,6 +247,31 @@ describe('Test cases for services/ai/get_beckn_request_from_text()', () => { expect(response.data.body.message.order.billing).to.have.property('phone') }); + + it('Should test get_beckn_request_from_text() succesfully for a `init` if billing details shared earlier', async () => { + let context = [ + {"role": "user", "content": "I'm looking for some ev chargers"}, + {"role": "assistant", "content": JSON.stringify(on_search_compressed)}, + {"role": "user", "content": "I want to select the first item"}, + {"role": "assistant", "content": JSON.stringify(on_select)} + ] + + const profile = { + "name": "John Doe", + "email": "john.doe@example.com", + "phone": "9999999999" + } + ai.action = {action: 'init'}; + const schema = await ai.get_schema_by_action(); + + const response = await ai.get_beckn_request_from_text("Lets place the order", context, on_init.context, schema, profile); + expect(response.data.body.message.order.billing).to.have.property('name') + expect(response.data.body.message.order.billing.name).to.eq(profile.name); + expect(response.data.body.message.order.billing).to.have.property('email') + expect(response.data.body.message.order.billing.email).to.eq(profile.email); + expect(response.data.body.message.order.billing).to.have.property('phone') + expect(response.data.body.message.order.billing.phone).to.eq(profile.phone); + }); it('Should test get_beckn_request_from_text() succesfully for a `confirm`', async () => { let context = [ @@ -283,3 +314,28 @@ describe('Test cases for services/ai/get_text_from_json()', () => { }) }) + +describe('Test cases for get_profile_from_text', () => { + it('Should return an object with billing details if billing details shared', async ()=> { + const response = await ai.get_profile_from_text('John Doe, 9999999999, john.doe@example.com'); + expect(response.status).to.be.true; + expect(response.data).to.have.property('name'); + expect(response.data.name).to.eq('John Doe'); + expect(response.data).to.have.property('phone'); + expect(response.data.phone).to.eq('9999999999'); + expect(response.data).to.have.property('email'); + expect(response.data.email).to.eq('john.doe@example.com'); + }) + + it('Should return misc. information about user if shared', async ()=> { + const response = await ai.get_profile_from_text('I just bough an EV and wanted to take it for a spin.'); + expect(response.status).to.be.true; + expect(response.data).to.have.property('misc'); + }) + + it('Should return nothing if no profile information available', async ()=> { + const response = await ai.get_profile_from_text('Yes please'); + expect(response.status).to.be.true; + expect(response.data).to.be.empty; + }) +})