Browse Source

Refines OpenAI model selection

- Moves AI settings into a new AI section
 - Renames new setting into a `gitlens.ai` namespace and changes it to be an enum for easier selection
 - Updates max characters to be dependent on selected model
main
Eric Amodio 1 year ago
parent
commit
1e976d2372
5 changed files with 75 additions and 27 deletions
  1. +2
    -1
      CHANGELOG.md
  2. +1
    -0
      README.md
  3. +37
    -14
      package.json
  4. +27
    -11
      src/ai/openaiProvider.ts
  5. +8
    -1
      src/config.ts

+ 2
- 1
CHANGELOG.md View File

@ -8,7 +8,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/) and this p
### Added ### Added
- Adds a `gitlens.experimental.openAIModel` setting to specify the OpenAI model to use to generate commit messages when using the `GitLens: Generate Commit Message` command (defaults to `gpt-3.5-turbo`) — closes [#2636](https://github.com/gitkraken/vscode-gitlens/issues/2636) thanks to [PR #2637](https://github.com/gitkraken/vscode-gitlens/pull/2637) by Daniel Rodríguez ([@sadasant](https://github.com/sadasant))
- Adds ability to choose the OpenAI model used for GitLens' experimental AI features — closes [#2636](https://github.com/gitkraken/vscode-gitlens/issues/2636) thanks to [PR #2637](https://github.com/gitkraken/vscode-gitlens/pull/2637) by Daniel Rodríguez ([@sadasant](https://github.com/sadasant))
- Adds a `gitlens.ai.experimental.openai.model` setting to specify the OpenAI model (defaults to `gpt-3.5-turbo`)
## [13.6.0] - 2023-05-11 ## [13.6.0] - 2023-05-11

+ 1
- 0
README.md View File

@ -1194,6 +1194,7 @@ A big thanks to the people that have contributed to this project:
- David Rees ([@studgeek](https://github.com/studgeek)) — [contributions](https://github.com/gitkraken/vscode-gitlens/commits?author=studgeek) - David Rees ([@studgeek](https://github.com/studgeek)) — [contributions](https://github.com/gitkraken/vscode-gitlens/commits?author=studgeek)
- Rickard ([@rickardp](https://github.com/rickardp)) — [contributions](https://github.com/gitkraken/vscode-gitlens/commits?author=rickardp) - Rickard ([@rickardp](https://github.com/rickardp)) — [contributions](https://github.com/gitkraken/vscode-gitlens/commits?author=rickardp)
- Johannes Rieken ([@jrieken](https://github.com/jrieken)) — [contributions](https://github.com/gitkraken/vscode-gitlens/commits?author=jrieken) - Johannes Rieken ([@jrieken](https://github.com/jrieken)) — [contributions](https://github.com/gitkraken/vscode-gitlens/commits?author=jrieken)
- Daniel Rodríguez ([@sadasant](https://github.com/sadasant)) — [contributions](https://github.com/gitkraken/vscode-gitlens/commits?author=sadasant)
- Guillaume Rozan ([@rozangu1](https://github.com/rozangu1)) — [contributions](https://github.com/gitkraken/vscode-gitlens/commits?author=rozangu1) - Guillaume Rozan ([@rozangu1](https://github.com/rozangu1)) — [contributions](https://github.com/gitkraken/vscode-gitlens/commits?author=rozangu1)
- ryenus ([@ryenus](https://github.com/ryenus)) — [contributions](https://github.com/gitkraken/vscode-gitlens/commits?author=ryenus) - ryenus ([@ryenus](https://github.com/ryenus)) — [contributions](https://github.com/gitkraken/vscode-gitlens/commits?author=ryenus)
- Felipe Santos ([@felipecrs](https://github.com/felipecrs)) — [contributions](https://github.com/gitkraken/vscode-gitlens/commits?author=felipecrs) - Felipe Santos ([@felipecrs](https://github.com/felipecrs)) — [contributions](https://github.com/gitkraken/vscode-gitlens/commits?author=felipecrs)

+ 37
- 14
package.json View File

@ -2806,6 +2806,43 @@
} }
}, },
{ {
"id": "ai",
"title": "AI",
"order": 113,
"properties": {
"gitlens.experimental.generateCommitMessagePrompt": {
"type": "string",
"default": "Commit messages must have a short description that is less than 50 chars followed by a newline and a more detailed description.\n- Write concisely using an informal tone and avoid specific names from the code",
"markdownDescription": "Specifies the prompt to use to tell OpenAI how to structure or format the generated commit message",
"scope": "window",
"order": 1
},
"gitlens.ai.experimental.openai.model": {
"type": "string",
"default": "gpt-3.5-turbo",
"enum": [
"gpt-3.5-turbo",
"gpt-3.5-turbo-0301",
"gpt-4",
"gpt-4-0314",
"gpt-4-32k",
"gpt-4-32k-0314"
],
"enumDescriptions": [
"GPT 3.5 Turbo",
"GPT 3.5 Turbo (March 1, 2021)",
"GPT 4",
"GPT 4 (March 14, 2021)",
"GPT 4 32k",
"GPT 4 32k (March 14, 2021)"
],
"markdownDescription": "Specifies the OpenAI model to use for GitLens' experimental AI features",
"scope": "window",
"order": 100
}
}
},
{
"id": "date-times", "id": "date-times",
"title": "Date & Times", "title": "Date & Times",
"order": 120, "order": 120,
@ -3660,20 +3697,6 @@
"scope": "window", "scope": "window",
"order": 50 "order": 50
}, },
"gitlens.experimental.generateCommitMessagePrompt": {
"type": "string",
"default": "Commit messages must have a short description that is less than 50 chars followed by a newline and a more detailed description.\n- Write concisely using an informal tone and avoid specific names from the code",
"markdownDescription": "Specifies the prompt to use to tell OpenAI how to structure or format the generated commit message",
"scope": "window",
"order": 55
},
"gitlens.experimental.openAIModel": {
"type": "string",
"default": "gpt-3.5-turbo",
"markdownDescription": "Specifies the OpenAI model to use to generate commit messages when using the `GitLens: Generate Commit Message` command",
"scope": "window",
"order": 56
},
"gitlens.advanced.externalDiffTool": { "gitlens.advanced.externalDiffTool": {
"type": [ "type": [
"string", "string",

+ 27
- 11
src/ai/openaiProvider.ts View File

@ -7,23 +7,25 @@ import type { Storage } from '../system/storage';
import { supportedInVSCodeVersion } from '../system/utils'; import { supportedInVSCodeVersion } from '../system/utils';
import type { AIProvider } from './aiProviderService'; import type { AIProvider } from './aiProviderService';
const maxCodeCharacters = 12000;
export class OpenAIProvider implements AIProvider { export class OpenAIProvider implements AIProvider {
readonly id = 'openai'; readonly id = 'openai';
readonly name = 'OpenAI'; readonly name = 'OpenAI';
private model: OpenAIChatCompletionModels = 'gpt-3.5-turbo';
private get model(): OpenAIModels {
return configuration.get('ai.experimental.openai.model') || 'gpt-3.5-turbo';
}
constructor(private readonly container: Container) {} constructor(private readonly container: Container) {}
dispose() {} dispose() {}
async generateCommitMessage(diff: string, options?: { context?: string }): Promise<string | undefined> { async generateCommitMessage(diff: string, options?: { context?: string }): Promise<string | undefined> {
this.model = configuration.get('experimental.openAIModel') || 'gpt-3.5-turbo';
const openaiApiKey = await getApiKey(this.container.storage); const openaiApiKey = await getApiKey(this.container.storage);
if (openaiApiKey == null) return undefined; if (openaiApiKey == null) return undefined;
const model = this.model;
const maxCodeCharacters = getMaxCharacters(model);
const code = diff.substring(0, maxCodeCharacters); const code = diff.substring(0, maxCodeCharacters);
if (diff.length > maxCodeCharacters) { if (diff.length > maxCodeCharacters) {
void window.showWarningMessage( void window.showWarningMessage(
@ -37,7 +39,7 @@ export class OpenAIProvider implements AIProvider {
} }
const data: OpenAIChatCompletionRequest = { const data: OpenAIChatCompletionRequest = {
model: this.model,
model: model,
messages: [ messages: [
{ {
role: 'system', role: 'system',
@ -82,11 +84,12 @@ export class OpenAIProvider implements AIProvider {
} }
async explainChanges(message: string, diff: string): Promise<string | undefined> { async explainChanges(message: string, diff: string): Promise<string | undefined> {
this.model = configuration.get('experimental.openAIModel') || 'gpt-3.5-turbo';
const openaiApiKey = await getApiKey(this.container.storage); const openaiApiKey = await getApiKey(this.container.storage);
if (openaiApiKey == null) return undefined; if (openaiApiKey == null) return undefined;
const model = this.model;
const maxCodeCharacters = getMaxCharacters(model);
const code = diff.substring(0, maxCodeCharacters); const code = diff.substring(0, maxCodeCharacters);
if (diff.length > maxCodeCharacters) { if (diff.length > maxCodeCharacters) {
void window.showWarningMessage( void window.showWarningMessage(
@ -95,7 +98,7 @@ export class OpenAIProvider implements AIProvider {
} }
const data: OpenAIChatCompletionRequest = { const data: OpenAIChatCompletionRequest = {
model: this.model,
model: model,
messages: [ messages: [
{ {
role: 'system', role: 'system',
@ -200,10 +203,23 @@ async function getApiKey(storage: Storage): Promise {
return openaiApiKey; return openaiApiKey;
} }
export type OpenAIChatCompletionModels = 'gpt-3.5-turbo' | 'gpt-3.5-turbo-0301' | 'gpt-4' | 'gpt-4-0314' | 'gpt-4-32k' | 'gpt-4-32k-0314';
function getMaxCharacters(model: OpenAIModels): number {
if (model === 'gpt-4-32k' || model === 'gpt-4-32k-0314') {
return 43000;
}
return 12000;
}
export type OpenAIModels =
| 'gpt-3.5-turbo'
| 'gpt-3.5-turbo-0301'
| 'gpt-4'
| 'gpt-4-0314'
| 'gpt-4-32k'
| 'gpt-4-32k-0314';
interface OpenAIChatCompletionRequest { interface OpenAIChatCompletionRequest {
model: OpenAIChatCompletionModels;
model: OpenAIModels;
messages: { role: 'system' | 'user' | 'assistant'; content: string }[]; messages: { role: 'system' | 'user' | 'assistant'; content: string }[];
temperature?: number; temperature?: number;
top_p?: number; top_p?: number;

+ 8
- 1
src/config.ts View File

@ -1,7 +1,15 @@
import type { OpenAIModels } from './ai/openaiProvider';
import type { DateTimeFormat } from './system/date'; import type { DateTimeFormat } from './system/date';
import { LogLevel } from './system/logger.constants'; import { LogLevel } from './system/logger.constants';
export interface Config { export interface Config {
ai: {
experimental: {
openai: {
model?: OpenAIModels;
};
};
};
autolinks: AutolinkReference[] | null; autolinks: AutolinkReference[] | null;
blame: { blame: {
avatars: boolean; avatars: boolean;
@ -49,7 +57,6 @@ export interface Config {
detectNestedRepositories: boolean; detectNestedRepositories: boolean;
experimental: { experimental: {
generateCommitMessagePrompt: string; generateCommitMessagePrompt: string;
openAIModel?: 'gpt-3.5-turbo' | 'gpt-3.5-turbo-0301' | 'gpt-4' | 'gpt-4-0314' | 'gpt-4-32k' | 'gpt-4-32k-0314';
}; };
fileAnnotations: { fileAnnotations: {
command: string | null; command: string | null;

Loading…
Cancel
Save