diff --git a/.eslintrc.json b/.eslintrc.json
index cdaee6c..3b71545 100644
--- a/.eslintrc.json
+++ b/.eslintrc.json
@@ -7,7 +7,8 @@
     },
     "plugins": [
         "@typescript-eslint",
-        "import"
+        "import",
+        "unused-imports"
     ],
     "rules": {
         "@typescript-eslint/consistent-type-imports": [
@@ -46,7 +47,8 @@
             }
         ],
         "no-throw-literal": "error",
-        "semi": "off"
+        "semi": "off",
+        "unused-imports/no-unused-imports": "error"
     },
     "ignorePatterns": [
         "out",
diff --git a/README.md b/README.md
index a207dda..f15a5be 100644
--- a/README.md
+++ b/README.md
@@ -87,6 +87,7 @@ Options:
   -i, --input <file>     input yaml file path to generate slide (default: "slides.yaml")
   -o, --output <file>    output path to write markdown file
   -l, --locale <locale>  locale of generated slide
+  -s, --service <service>  service to use ("openai" or "azure-ai-inference") (default: "openai")
   -u, --apiurl <url>     base url of openai api (e.g.: https://api.openai.com/v1)
   -k, --apikey           api key of openai (or openai-compatible) api
   -m, --model <model>    model of openai api
diff --git a/package.json b/package.json
index 4b8c502..6743901 100644
--- a/package.json
+++ b/package.json
@@ -73,37 +73,53 @@
     "configuration": {
       "title": "Slidaiv",
       "properties": {
-        "slidaiv.apiKey": {
-          "type": "null",
-          "default": null,
-          "markdownDescription": "Set API Key to authorize requests to OpenAI API from [here](command:slidaiv.command.setApiKey)."
+        "slidaiv.llmService": {
+          "type": "string",
+          "order": 1,
+          "default": "openai",
+          "enum": [
+            "openai",
+            "azure-ai-inference"
+          ],
+          "description": "Select LLM service to generate Slidev contents. (default: openai)"
         },
         "slidaiv.baseUrl": {
           "type": "string",
+          "order": 2,
           "default": "https://api.openai.com/v1",
-          "description": "Specify OpenAI API Base URL (default: https://api.openai.com/v1). Enter this if you use OpenAI Compatible API."
+          "markdownDescription": "Specify LLM service's base URL (default: `https://api.openai.com/v1`). If you select `azure-ai-inference` in `#slidaiv.llmService#`, you cannot configure this setting, and `https://models.inference.ai.azure.com` will be used."
+        },
+        "slidaiv.apiKey": {
+          "type": "null",
+          "order": 3,
+          "default": null,
+          "markdownDescription": "API Key must be set from [here](command:slidaiv.command.setApiKey). API Key will be used to authorize requests to selected LLM Service (`#slidaiv.llmService#`)."
         },
         "slidaiv.model": {
           "type": "string",
-          "default": "gpt-3.5-turbo",
-          "description": "Specify the model to use. (default: gpt-3.5-turbo)"
+          "order": 4,
+          "default": "gpt-4o",
+          "description": "Enter the LLM model name. (default: gpt-4o)"
         },
         "slidaiv.prompt.generate": {
           "type": "string",
+          "order": 10,
           "editPresentation": "multilineText",
           "default": "",
           "markdownDescription": "System Prompt for `Generate Slidev contents` command.  \nYou can use variable `${locale}` in your prompt, which will be replaced with locale setting in runtime.\n\nIf empty, [the default prompt](https://github.com/kaakaa/slidaiv/blob/master/src/client/prompts.ts) will be used. (Default is empty)"
         },
         "slidaiv.prompt.decorate": {
           "type": "string",
+          "order": 11,
           "editPresentation": "multilineText",
           "default": "",
           "markdownDescription": "System Prompt for `Decorate contents (Experimental)` command.\n\nIf empty, [the default prompt](https://github.com/kaakaa/slidaiv/blob/master/src/client/prompts.ts) will be used. (Default is empty)"
         },
         "slidaiv.debug": {
           "type": "boolean",
+          "order": 90,
           "default": false,
-          "description": "Enable to output debug logs."
+          "description": "Enable debug log"
         }
       }
     }
@@ -142,12 +158,16 @@
     "esbuild": "^0.23.0",
     "eslint": "^8.57.0",
     "eslint-plugin-import": "^2.29.1",
+    "eslint-plugin-unused-imports": "^4.1.3",
     "ts-loader": "^9.5.1",
     "typescript": "^5.4.5",
     "webpack": "^5.91.0",
     "webpack-cli": "^5.1.4"
   },
   "dependencies": {
+    "@azure-rest/ai-inference": "1.0.0-beta.2",
+    "@azure/core-auth": "^1.7.2",
+    "@azure/core-sse": "^2.1.3",
     "@slidev/parser": "^0.49.27",
     "@slidev/types": "^0.49.27",
     "cli-progress": "^3.12.0",
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index 1f3d5fd..547165d 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -8,6 +8,15 @@ importers:
 
   .:
     dependencies:
+      '@azure-rest/ai-inference':
+        specifier: 1.0.0-beta.2
+        version: 1.0.0-beta.2
+      '@azure/core-auth':
+        specifier: ^1.7.2
+        version: 1.7.2
+      '@azure/core-sse':
+        specifier: ^2.1.3
+        version: 2.1.3
       '@slidev/parser':
         specifier: ^0.49.27
         version: 0.49.27(@vue/compiler-sfc@3.4.38)(postcss@8.4.41)(rollup@4.21.0)(typescript@5.4.5)(vite@5.4.2(@types/node@18.19.33)(terser@5.31.0))
@@ -84,6 +93,9 @@ importers:
       eslint-plugin-import:
         specifier: ^2.29.1
         version: 2.29.1(@typescript-eslint/parser@7.10.0(eslint@8.57.0)(typescript@5.4.5))(eslint@8.57.0)
+      eslint-plugin-unused-imports:
+        specifier: ^4.1.3
+        version: 4.1.3(@typescript-eslint/eslint-plugin@7.10.0(@typescript-eslint/parser@7.10.0(eslint@8.57.0)(typescript@5.4.5))(eslint@8.57.0)(typescript@5.4.5))(eslint@8.57.0)
       ts-loader:
         specifier: ^9.5.1
         version: 9.5.1(typescript@5.4.5)(webpack@5.91.0(esbuild@0.23.0)(webpack-cli@5.1.4))
@@ -112,6 +124,14 @@ packages:
   '@antfu/utils@0.7.10':
     resolution: {integrity: sha512-+562v9k4aI80m1+VuMHehNJWLOFjBnXn3tdOitzD0il5b7smkSBal4+a3oKiQTbrwMmN/TBUMDvbdoWDehgOww==}
 
+  '@azure-rest/ai-inference@1.0.0-beta.2':
+    resolution: {integrity: sha512-xPnXJmldQ7vufTT1GtoHPk+korWFnl72whoDB34tb9DALTGGBKIAWobDQBMGYdV2ce0/KdhRFPwnfeZjNyfVsw==}
+    engines: {node: '>=18.0.0'}
+
+  '@azure-rest/core-client@2.2.0':
+    resolution: {integrity: sha512-2uPSZPRb2TRyYONl5IcsWhX7C1xbm6Gof/tcLlDnYg7fCVcEfASfySKZ9OTHxhNeFgo79LhrT6juEoxTHvrXkQ==}
+    engines: {node: '>=18.0.0'}
+
   '@azure/abort-controller@1.1.0':
     resolution: {integrity: sha512-TrRLIoSQVzfAJX9H1JeFjzAoDGcoK1IYX1UImfceTZpsyYfWr09Ss1aHW1y5TrrR3iq6RZLBwJ3E24uwPhwahw==}
     engines: {node: '>=12.0.0'}
@@ -128,10 +148,18 @@ packages:
     resolution: {integrity: sha512-kRdry/rav3fUKHl/aDLd/pDLcB+4pOFwPPTVEExuMyaI5r+JBbMWqRbCY1pn5BniDaU3lRxO9eaQ1AmSMehl/w==}
     engines: {node: '>=18.0.0'}
 
+  '@azure/core-lro@2.7.2':
+    resolution: {integrity: sha512-0YIpccoX8m/k00O7mDDMdJpbr6mf1yWo2dfmxt5A8XVZVVMz2SSKaEbMCeJRvgQ0IaSlqhjT47p4hVIRRy90xw==}
+    engines: {node: '>=18.0.0'}
+
   '@azure/core-rest-pipeline@1.16.0':
     resolution: {integrity: sha512-CeuTvsXxCUmEuxH5g/aceuSl6w2EugvNHKAtKKVdiX915EjJJxAwfzNNWZreNnbxHZ2fi0zaM6wwS23x2JVqSQ==}
     engines: {node: '>=18.0.0'}
 
+  '@azure/core-sse@2.1.3':
+    resolution: {integrity: sha512-KSSdIKy8kvWCpYr8Hzpu22j3wcXsVTYE0IlgmI1T/aHvBDsLgV91y90UTfVWnuiuApRLCCVC4gS09ApBGOmYQA==}
+    engines: {node: '>=18.0.0'}
+
   '@azure/core-tracing@1.1.2':
     resolution: {integrity: sha512-dawW9ifvWAWmUm9/h+/UQ2jrdvjCJ7VJEuCJ6XVNudzcOwm53BFZH4Q845vjfgoUAM8ZxokvVNxNxAITc502YA==}
     engines: {node: '>=18.0.0'}
@@ -2692,6 +2720,15 @@ packages:
       '@typescript-eslint/parser':
         optional: true
 
+  eslint-plugin-unused-imports@4.1.3:
+    resolution: {integrity: sha512-lqrNZIZjFMUr7P06eoKtQLwyVRibvG7N+LtfKtObYGizAAGrcqLkc3tDx+iAik2z7q0j/XI3ihjupIqxhFabFA==}
+    peerDependencies:
+      '@typescript-eslint/eslint-plugin': ^8.0.0-0 || ^7.0.0 || ^6.0.0 || ^5.0.0
+      eslint: ^9.0.0 || ^8.0.0
+    peerDependenciesMeta:
+      '@typescript-eslint/eslint-plugin':
+        optional: true
+
   eslint-scope@5.1.1:
     resolution: {integrity: sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==}
     engines: {node: '>=8.0.0'}
@@ -4902,6 +4939,29 @@ snapshots:
 
   '@antfu/utils@0.7.10': {}
 
+  '@azure-rest/ai-inference@1.0.0-beta.2':
+    dependencies:
+      '@azure-rest/core-client': 2.2.0
+      '@azure/abort-controller': 1.1.0
+      '@azure/core-auth': 1.7.2
+      '@azure/core-lro': 2.7.2
+      '@azure/core-rest-pipeline': 1.16.0
+      '@azure/logger': 1.1.2
+      tslib: 2.6.2
+    transitivePeerDependencies:
+      - supports-color
+
+  '@azure-rest/core-client@2.2.0':
+    dependencies:
+      '@azure/abort-controller': 2.1.2
+      '@azure/core-auth': 1.7.2
+      '@azure/core-rest-pipeline': 1.16.0
+      '@azure/core-tracing': 1.1.2
+      '@azure/core-util': 1.9.0
+      tslib: 2.6.2
+    transitivePeerDependencies:
+      - supports-color
+
   '@azure/abort-controller@1.1.0':
     dependencies:
       tslib: 2.6.2
@@ -4928,6 +4988,13 @@ snapshots:
     transitivePeerDependencies:
       - supports-color
 
+  '@azure/core-lro@2.7.2':
+    dependencies:
+      '@azure/abort-controller': 2.1.2
+      '@azure/core-util': 1.9.0
+      '@azure/logger': 1.1.2
+      tslib: 2.6.2
+
   '@azure/core-rest-pipeline@1.16.0':
     dependencies:
       '@azure/abort-controller': 2.1.2
@@ -4941,6 +5008,10 @@ snapshots:
     transitivePeerDependencies:
       - supports-color
 
+  '@azure/core-sse@2.1.3':
+    dependencies:
+      tslib: 2.6.2
+
   '@azure/core-tracing@1.1.2':
     dependencies:
       tslib: 2.6.2
@@ -8166,6 +8237,12 @@ snapshots:
       - eslint-import-resolver-webpack
       - supports-color
 
+  eslint-plugin-unused-imports@4.1.3(@typescript-eslint/eslint-plugin@7.10.0(@typescript-eslint/parser@7.10.0(eslint@8.57.0)(typescript@5.4.5))(eslint@8.57.0)(typescript@5.4.5))(eslint@8.57.0):
+    dependencies:
+      eslint: 8.57.0
+    optionalDependencies:
+      '@typescript-eslint/eslint-plugin': 7.10.0(@typescript-eslint/parser@7.10.0(eslint@8.57.0)(typescript@5.4.5))(eslint@8.57.0)(typescript@5.4.5)
+
   eslint-scope@5.1.1:
     dependencies:
       esrecurse: 4.3.0
diff --git a/src/cli/main.ts b/src/cli/main.ts
index 15a0127..74ae86e 100644
--- a/src/cli/main.ts
+++ b/src/cli/main.ts
@@ -6,10 +6,8 @@ import { MultiBar, Presets } from 'cli-progress';
 import { parse } from "@slidev/parser";
 
 import { Logger } from '@/logger';
-import { loadConfig as loadSettings, SlidevHeader } from '@/cli/util';
-import type { GeneratedSlide } from '@/cli/util';
-import { Client } from '@/client/openai';
-import type { CustomCancellationToken } from '@/client/llmClient';
+import { loadConfig as loadSettings, SlidevHeader, type GeneratedSlide } from '@/cli/util';
+import { LLMClientFactory, type CustomCancellationToken } from '@/client/llmClient';
 import { SlidevPage } from '@/model/slidev';
 
 Logger.init((message: string) => { console.log(message); });
@@ -20,8 +18,9 @@ program
   .option('-i, --input <file>', 'input yaml file path to generate slide', 'slides.yaml')
   .option('-o, --output <file>', 'output path to write markdown file')
   .option('-l, --locale <locale>', 'locale of generated slide')
+  .option('-s, --service <service>', 'service to use ("openai" or "azure-ai-inference")', 'openai')
   .option('-u, --apiurl <url>', 'base url of openai api (e.g.: https://api.openai.com/v1)')
-  .option('-k, --apikey', 'api key of openai (or openai-compatible) api ')
+  .option('-k, --apikey <apikey>', 'api key of openai (or openai-compatible) api ')
   .option('-m, --model <model>', 'model of openai api')
   .option('-d, --debug', 'output extra debugging', false);
 const options = program.parse().opts();
@@ -37,7 +36,7 @@ class CancelHandler implements CustomCancellationToken {
 // Set up
 const multi = new MultiBar({}, Presets.shades_classic);
 const progress = multi.create(settings.slides?.length, 0);
-const client = new Client(settings.context, settings.context.locale);
+const client = LLMClientFactory.create(settings.context, settings.context.locale);
 
 multi.log("Generating slides...\n");
 
diff --git a/src/cli/util.ts b/src/cli/util.ts
index 50ca926..ab871f5 100644
--- a/src/cli/util.ts
+++ b/src/cli/util.ts
@@ -43,13 +43,14 @@ title: AI-generated slides
 `;
 
 export function loadConfig(f: string, options: OptionValues): CLISettings {
-    const { input, output, locale, apiurl, apikey, model, debug } = options;
+    const { input, output, locale, service, apiurl, apikey, model, debug } = options;
     const settings = yaml.parse(f) as CLISettings;
 
     const loc = locale ?? settings.context.locale ?? "en";
 
     return {
         context: {
+            service: service ?? settings.context.service ?? "openai",
             apiKey: apikey ?? settings.context.apiKey ?? "dummy",
             baseUrl: apiurl ?? settings.context.baseUrl ?? "https://openai.com/v1",
             model: model ?? settings.context.model ?? "gpt-4o",
diff --git a/src/client/azure.ts b/src/client/azure.ts
new file mode 100644
index 0000000..cbac31d
--- /dev/null
+++ b/src/client/azure.ts
@@ -0,0 +1,122 @@
+import createClient from "@azure-rest/ai-inference";
+import type { ChatCompletionsOutput, ModelClient } from "@azure-rest/ai-inference";
+import { AzureKeyCredential } from "@azure/core-auth";
+
+import { Logger } from "@/logger";
+import { getLocaleName } from "@/utils";
+import type { Configuration } from "@/model/config";
+import type { CustomCancellationToken } from "@/client/llmClient";
+import { evalPromptLiteral, getDefaultPromptDecorateContents, getDefaultPromptForGenerateContents } from "@/client/prompts";
+
+export class AzureAIClient {
+    private client: ModelClient;
+    private model: string;
+    private promptGenerate: string;
+    private promptDecorate: string;
+    private defaultLocale: string;
+
+    constructor(config: Configuration, locale: string) {
+        this.client = createClient("https://models.inference.ai.azure.com", new AzureKeyCredential(config.apiKey));
+        this.model = config.model;
+        this.defaultLocale = locale;
+        this.promptGenerate = config.promptGenerate;
+        this.promptDecorate = config.promptDecorate;
+    }
+
+    async generatePageContents(token: CustomCancellationToken, prompt: string, m: string | null, locale: string | null): Promise<string | null> {
+        const ac = new AbortController();
+        token.onCancellationRequested(() => {
+            Logger.info("User requested to cancel the task.");
+            ac.abort();
+        });
+
+        const loc = getLocaleName(locale || this.defaultLocale);
+        const model = m ?? this.model;
+        let sysPrompt;
+        if (this.promptGenerate && this.promptGenerate.length > 0) {
+            sysPrompt = evalPromptLiteral(this.promptGenerate, { locale: loc });
+        } else {
+            Logger.info("Default prompt is used, because custom prompt is not set.");
+            sysPrompt = getDefaultPromptForGenerateContents(loc);
+        }
+        Logger.info(`Call Azure AI Inference SDK details: model=${model}, locale=${locale}`);
+        Logger.debug(`sysPrompt=${sysPrompt}`);
+
+        const response = await this.client.path("/chat/completions").post({
+            body: {
+                messages: [
+                    { role: "system", content: sysPrompt },
+                    { role: "user", content: prompt }
+                ],
+                model: model,
+                temperature: 1,
+                max_tokens: 4096,
+                top_p: 1
+            },
+            abortSignal: ac.signal,
+        });
+
+        Logger.info(`Request completed with ${response?.status}`);
+        if (response.status !== "200") {
+            if ("error" in response.body) {
+                throw response.body.error;
+            } else {
+                throw new Error(`Request was failed with ${response.status}. Unknown error occurred.`);
+            }
+        }
+        const output: ChatCompletionsOutput = response.body as ChatCompletionsOutput;
+        Logger.debug(`  Model: ${output?.model}`);
+        Logger.debug(`  Usage: ${JSON.stringify(output?.usage)}`);
+        if (output?.choices?.length > 0) {
+            return output.choices[0].message.content;
+        }
+        throw new Error(`Response from Azure model is empty or unexpected.`);    
+    }
+
+    async decorateContents(token: CustomCancellationToken, prompt: string) {
+        const ac = new AbortController();
+        token.onCancellationRequested(() => {
+            ac.abort();
+        });
+
+        let sysPrompt;
+        if (this.promptDecorate && this.promptDecorate.length > 0) {
+            sysPrompt = evalPromptLiteral(this.promptDecorate, { prompt });
+        } else {
+            Logger.info("Default prompt is used, because custom prompt is not set.");
+            sysPrompt = getDefaultPromptDecorateContents();
+        }
+        Logger.info(`Call Azure AI Inference SDK details: model=${this.model}`);
+        Logger.debug(`sysPrompt=${sysPrompt}`);
+
+        const response = await this.client.path("/chat/completions").post({
+            body: {
+                messages: [
+                    { role: "system", content: sysPrompt },
+                    { role: "user", content: prompt }
+                ],
+                model: this.model,
+                temperature: 1,
+                max_tokens: 4096,
+                top_p: 1
+            },
+            abortSignal: ac.signal,
+        });
+
+        Logger.info(`Request completed with ${response?.status}`);
+        if (response.status !== "200") {
+            if ("error" in response.body) {
+                throw response.body.error;
+            } else {
+                throw new Error(`Request was failed with ${response.status}. Unknown error occurred.`);
+            }
+        }
+        const output: ChatCompletionsOutput = response.body as ChatCompletionsOutput;
+        Logger.debug(`  Model: ${output?.model}`);
+        Logger.debug(`  Usage: ${JSON.stringify(output?.usage)}`);
+        if (output?.choices?.length > 0) {
+            return output.choices[0].message.content;
+        }
+        throw new Error(`Response from Azure model is empty or unexpected.`);    
+    }
+}
diff --git a/src/client/llmClient.ts b/src/client/llmClient.ts
index e3aa695..da34a46 100644
--- a/src/client/llmClient.ts
+++ b/src/client/llmClient.ts
@@ -1,3 +1,7 @@
+import type { Configuration } from "@/model/config";
+import { OpenAIClient } from "@/client/openai";
+import { AzureAIClient } from "@/client/azure";
+
 export interface LLMClient {
     generatePageContents(token: CustomCancellationToken, prompt: string, model: string | null, locale: string | null): Promise<string | null>;
     decorateContents(token: CustomCancellationToken, prompt: string): Promise<string | null>;
@@ -26,3 +30,16 @@ export class UnconfiguredClient implements LLMClient {
     }
 
 }
+
+export class LLMClientFactory {
+    static create(config: Configuration, locale: string): LLMClient {
+        switch (config.service) {
+            case 'openai':
+                return new OpenAIClient(config, locale);
+            case 'azure-ai-inference':
+                return new AzureAIClient(config, locale);
+            default:
+                return UnconfiguredClient.instance;
+        }
+    }
+}
\ No newline at end of file
diff --git a/src/client/openai.ts b/src/client/openai.ts
index f47b7dd..ac8755d 100644
--- a/src/client/openai.ts
+++ b/src/client/openai.ts
@@ -10,7 +10,7 @@ import { Logger } from '@/logger';
 import type { CustomCancellationToken, LLMClient } from '@/client/llmClient';
 import type { Configuration } from '@/model/config';
 
-export class Client implements LLMClient {
+export class OpenAIClient implements LLMClient {
     private client: OpenAI;
     private llmModel: string;
     private promptGenerate: string;
diff --git a/src/constants.ts b/src/constants.ts
index f780c93..2925152 100644
--- a/src/constants.ts
+++ b/src/constants.ts
@@ -8,6 +8,7 @@ export const CommandIdDecorateContents = 'slidaiv.command.decorateContents';
 
 export const PreferenceIdApiKey = 'slidaiv.apikey';
 
+export const ConfigKeyLLMService = 'llmService';
 export const ConfigKeyApiBaseURL = 'baseUrl';
 export const ConfigKeyLLMModel = 'model';
 export const ConfigKeyPromptGenerate = 'prompt.generate';
diff --git a/src/extension.ts b/src/extension.ts
index 47f6951..f77fa48 100644
--- a/src/extension.ts
+++ b/src/extension.ts
@@ -2,18 +2,16 @@ import * as vscode from 'vscode';
 
 import { CommandIdDecorateContents, CommandIdGenerateContents, CommandIdOpenSettingsApiKey, CommandIdSetApiKey, ExtensionID, ExtensionName, MessageSelectionSetApiKey, MessageSetApiKey, PreferenceIdApiKey } from '@/constants';
 import { SecretTokenStore as SecretApiKeyStore } from '@/secret';
-import { Client } from '@/client/openai';
 import { Logger } from '@/logger';
 import { doTaskWithProgress, getTaskDecorateContent, getTaskGenerateContents } from '@/tasks';
 import { readConfiguration } from '@/model/config';
-import { UnconfiguredClient } from '@/client/llmClient';
+import { LLMClientFactory, UnconfiguredClient } from '@/client/llmClient';
 import type { LLMClient } from '@/client/llmClient';
 
-async function initialize() {
+async function initialize(): Promise<LLMClient> {
 	const config = await readConfiguration();
 	Logger.isDebug = config.isDebug;
-	const client = new Client(config, vscode.env.language);
-	return client;
+	return LLMClientFactory.create(config, vscode.env.language);
 }
 
 async function setApiKey() {
diff --git a/src/logger.ts b/src/logger.ts
index 2c93983..2206da0 100644
--- a/src/logger.ts
+++ b/src/logger.ts
@@ -1,5 +1,3 @@
-import type * as vscode from 'vscode';
-
 export interface LogOutput {
     appendLine: (message: string) => void;
 }
diff --git a/src/model/config.ts b/src/model/config.ts
index c8546dc..3b45e4e 100644
--- a/src/model/config.ts
+++ b/src/model/config.ts
@@ -3,7 +3,9 @@ import * as vscode from 'vscode';
 import * as Constants from '@/constants';
 import { SecretTokenStore } from '@/secret';
 
+type LLMService = "openai" | "azure-ai-inference";
 export type Configuration = {
+    service: LLMService;
     apiKey: string;
     baseUrl: string;
     model: string;
@@ -17,6 +19,7 @@ export async function readConfiguration(): Promise<Configuration> {
     const apiKey = await SecretTokenStore.instance.get();
     const ws = vscode.workspace;
     return {
+        service: ws.getConfiguration(Constants.ExtensionID).get(Constants.ConfigKeyLLMService) ?? 'openai',
         apiKey: apiKey ?? '',
         baseUrl: ws.getConfiguration(Constants.ExtensionID).get(Constants.ConfigKeyApiBaseURL) ?? '',
         model: ws.getConfiguration(Constants.ExtensionID).get(Constants.ConfigKeyLLMModel) ?? '',
diff --git a/webpack.config.js b/webpack.config.js
index eecc612..22c218f 100644
--- a/webpack.config.js
+++ b/webpack.config.js
@@ -22,7 +22,8 @@ const extensionConfig = {
   },
   devtool: 'source-map',
   externals: {
-    vscode: 'commonjs vscode' // the vscode-module is created on-the-fly and must be excluded. Add other modules that cannot be webpack'ed, 📖 -> https://webpack.js.org/configuration/externals/
+    vscode: 'commonjs vscode', // the vscode-module is created on-the-fly and must be excluded. Add other modules that cannot be webpack'ed, 📖 -> https://webpack.js.org/configuration/externals/
+    '@azure/core-rest-pipeline': 'commonjs @azure/core-rest-pipeline', // Avoid 'proxyPolicy is not suported in browser environement' error
     // modules added here also need to be added in the .vscodeignore file
   },
   resolve: {