Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

initial commit of the OpenAI Agent POC #629

Open
wants to merge 41 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
41 commits
Select commit Hold shift + click to select a range
a42dca2
initial commit of the OpenAI Agent POC
birdperson1970 Nov 16, 2023
8a57daf
fixed functions
birdperson1970 Nov 16, 2023
0c8fb08
bad assit id
birdperson1970 Nov 17, 2023
cea8455
about to add step
birdperson1970 Nov 20, 2023
66a6b95
Implemented the OpenAI Run Function Step
birdperson1970 Nov 20, 2023
a38f767
fixed imports
birdperson1970 Nov 21, 2023
3874060
implemented slash command and functions now work
birdperson1970 Nov 21, 2023
ad60fe0
Linked UI session to OpenAI thread
birdperson1970 Nov 23, 2023
cb50c4e
bug fixes and launch config
birdperson1970 Nov 24, 2023
16e492d
Initial Docs
birdperson1970 Nov 24, 2023
bc7c66b
Added GPT3.5 and fixed models
birdperson1970 Nov 25, 2023
2bd91fd
fixed bad open api model names
birdperson1970 Nov 25, 2023
2be212d
added logging
birdperson1970 Nov 25, 2023
d13acc8
Jazzed up the logging
birdperson1970 Nov 25, 2023
5ae2a78
poc
birdperson1970 Nov 25, 2023
5d6fdef
unit test for fuzzy search
birdperson1970 Nov 25, 2023
33a91d6
merge
birdperson1970 Nov 26, 2023
ba2b4f1
Merge remote-tracking branch 'upstream/main' into openai_agent_poc
birdperson1970 Nov 26, 2023
d08bfe0
merged files
birdperson1970 Nov 26, 2023
62422f0
Som broken But I need github perm links
birdperson1970 Dec 1, 2023
478a6c9
gen files update
birdperson1970 Dec 2, 2023
6ea256a
clean compile
birdperson1970 Dec 2, 2023
5af454d
can't find llm
birdperson1970 Dec 2, 2023
914137b
Merge remote-tracking branch 'origin/main' into pydantic_2_upgrade
birdperson1970 Dec 2, 2023
2d137bb
Wroking
birdperson1970 Dec 3, 2023
7b8528e
final tweaks for review
birdperson1970 Dec 3, 2023
15aa9f7
fixed python-lsp-server[websockets]
birdperson1970 Dec 3, 2023
4795e0d
fixed assistent_id
birdperson1970 Dec 3, 2023
e2fcf01
Merge remote-tracking branch 'origin/pydantic_2_upgrade' into openai_…
birdperson1970 Dec 3, 2023
2f3bad0
pulling back the version for WSL compatability
birdperson1970 Dec 3, 2023
cffbf2a
module version problem reverting
birdperson1970 Dec 3, 2023
9a1002d
migrated schema generation to Pydantic V2
birdperson1970 Dec 6, 2023
82c0333
fixed chroma db version issue
birdperson1970 Dec 6, 2023
30c176f
Merge remote-tracking branch 'origin/pydantic_2_upgrade' into openai_…
birdperson1970 Dec 6, 2023
41e9fea
broken waiting for pydantic 2 fix
birdperson1970 Dec 7, 2023
ae3610f
fixed aweful Union serialization bug
birdperson1970 Dec 9, 2023
f19a40a
pydantic v2 fixed union issue
birdperson1970 Dec 9, 2023
2cd7c05
Merge remote-tracking branch 'origin/pydantic_2_upgrade' into openai_…
birdperson1970 Dec 9, 2023
69ef211
fixed templates to validation
birdperson1970 Dec 9, 2023
7fda98f
Merge remote-tracking branch 'origin/pydantic_2_upgrade' into openai_…
birdperson1970 Dec 9, 2023
9cf002d
Working
birdperson1970 Dec 9, 2023
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
4 changes: 2 additions & 2 deletions extensions/vscode/package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion extensions/vscode/package.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"name": "continue",
"icon": "media/icon.png",
"version": "0.6.4",
"version": "0.6.7",
"repository": {
"type": "git",
"url": "https://github.com/continuedev/continue"
Expand Down
5 changes: 5 additions & 0 deletions extensions/vscode/schema/ContinueConfig.d.ts
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,10 @@ export type SystemMessage1 = string;
* The maximum context length of the LLM in tokens, as counted by count_tokens.
*/
export type ContextLength = number;
/**
* The maximum number of tokens to generate.
*/
export type MaxTokens = number;
/**
* Tokens that will stop the completion.
*/
Expand Down Expand Up @@ -291,6 +295,7 @@ export interface LLM {
model: Model;
system_message?: SystemMessage1;
context_length?: ContextLength;
max_tokens?: MaxTokens;
stop_tokens?: StopTokens;
temperature?: Temperature;
top_p?: TopP;
Expand Down
5 changes: 5 additions & 0 deletions extensions/vscode/schema/LLM.d.ts
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,10 @@ export type SystemMessage = string;
* The maximum context length of the LLM in tokens, as counted by count_tokens.
*/
export type ContextLength = number;
/**
* The maximum number of tokens to generate.
*/
export type MaxTokens = number;
/**
* Tokens that will stop the completion.
*/
Expand Down Expand Up @@ -77,6 +81,7 @@ export interface LLM1 {
model: Model;
system_message?: SystemMessage;
context_length?: ContextLength;
max_tokens?: MaxTokens;
stop_tokens?: StopTokens;
temperature?: Temperature;
top_p?: TopP;
Expand Down
5 changes: 5 additions & 0 deletions extensions/vscode/schema/Models.d.ts
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,10 @@ export type SystemMessage = string;
* The maximum context length of the LLM in tokens, as counted by count_tokens.
*/
export type ContextLength = number;
/**
* The maximum number of tokens to generate.
*/
export type MaxTokens = number;
/**
* Tokens that will stop the completion.
*/
Expand Down Expand Up @@ -93,6 +97,7 @@ export interface LLM {
model: Model;
system_message?: SystemMessage;
context_length?: ContextLength;
max_tokens?: MaxTokens;
stop_tokens?: StopTokens;
temperature?: Temperature;
top_p?: TopP;
Expand Down
5 changes: 5 additions & 0 deletions gui/src/schema/ContinueConfig.d.ts
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,10 @@ export type SystemMessage1 = string;
* The maximum context length of the LLM in tokens, as counted by count_tokens.
*/
export type ContextLength = number;
/**
* The maximum number of tokens to generate.
*/
export type MaxTokens = number;
/**
* Tokens that will stop the completion.
*/
Expand Down Expand Up @@ -291,6 +295,7 @@ export interface LLM {
model: Model;
system_message?: SystemMessage1;
context_length?: ContextLength;
max_tokens?: MaxTokens;
stop_tokens?: StopTokens;
temperature?: Temperature;
top_p?: TopP;
Expand Down
5 changes: 5 additions & 0 deletions gui/src/schema/LLM.d.ts
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,10 @@ export type SystemMessage = string;
* The maximum context length of the LLM in tokens, as counted by count_tokens.
*/
export type ContextLength = number;
/**
* The maximum number of tokens to generate.
*/
export type MaxTokens = number;
/**
* Tokens that will stop the completion.
*/
Expand Down Expand Up @@ -77,6 +81,7 @@ export interface LLM1 {
model: Model;
system_message?: SystemMessage;
context_length?: ContextLength;
max_tokens?: MaxTokens;
stop_tokens?: StopTokens;
temperature?: Temperature;
top_p?: TopP;
Expand Down
5 changes: 5 additions & 0 deletions gui/src/schema/Models.d.ts
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,10 @@ export type SystemMessage = string;
* The maximum context length of the LLM in tokens, as counted by count_tokens.
*/
export type ContextLength = number;
/**
* The maximum number of tokens to generate.
*/
export type MaxTokens = number;
/**
* Tokens that will stop the completion.
*/
Expand Down Expand Up @@ -93,6 +97,7 @@ export interface LLM {
model: Model;
system_message?: SystemMessage;
context_length?: ContextLength;
max_tokens?: MaxTokens;
stop_tokens?: StopTokens;
temperature?: Temperature;
top_p?: TopP;
Expand Down
35 changes: 35 additions & 0 deletions gui/src/util/modelData.ts
Original file line number Diff line number Diff line change
Expand Up @@ -396,6 +396,18 @@ const gpt35turbo: ModelPackage = {
},
};

const gpt4_agent: ModelPackage = {
title: "GPT4_Agent",
description:
"GPT4 Agent",
params: {
model: "gpt4_agent",
context_length: 8096,
title: "GPT4_Agent",
api_key: "",
},
};

const OLLAMA_TO_REPLICATE_MODEL_NAMES: { [key: string]: string } = {
"codellama:7b-instruct":
"meta/codellama-7b-instruct:6527b83e01e41412db37de5110a8670e3701ee95872697481a355e05ce12af0e",
Expand Down Expand Up @@ -770,4 +782,27 @@ After it's up and running, you can start using Continue.`,
],
collectInputFor: [...completionParamsInputs],
},
openai_agent: {
title: "OpenAIAgent",
class: "OpenAIAgent",
description:
"OpenAIAgent description",
longDescription:
'OpenAIAgent longDescription',
icon: "openai.png",
tags: [ModelProviderTag.Free],
packages: [
{ ...gpt4_agent, title: "GPT-4 Agent" }
],
collectInputFor: [
{
inputType: CollectInputType.text,
key: "api_key",
label: "API Key",
placeholder: "Enter your OpenAI API key",
required: true,
},
...completionParamsInputs,
],
},
};
18 changes: 14 additions & 4 deletions schema/json/ContinueConfig.json
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,12 @@
"default": 2048,
"type": "integer"
},
"max_tokens": {
"title": "Max Tokens",
"description": "The maximum number of tokens to generate.",
"default": 600,
"type": "integer"
},
"stop_tokens": {
"title": "Stop Tokens",
"description": "Tokens that will stop the completion.",
Expand Down Expand Up @@ -521,7 +527,8 @@
"title": null,
"model": "gpt-4",
"system_message": null,
"context_length": 2048,
"context_length": 8192,
"max_tokens": 600,
"stop_tokens": null,
"temperature": null,
"top_p": null,
Expand All @@ -543,7 +550,8 @@
"title": null,
"model": "gpt-3.5-turbo",
"system_message": null,
"context_length": 2048,
"context_length": 4096,
"max_tokens": 600,
"stop_tokens": null,
"temperature": null,
"top_p": null,
Expand All @@ -565,7 +573,8 @@
"title": null,
"model": "gpt-4",
"system_message": null,
"context_length": 2048,
"context_length": 8192,
"max_tokens": 600,
"stop_tokens": null,
"temperature": null,
"top_p": null,
Expand All @@ -587,7 +596,8 @@
"title": null,
"model": "gpt-4",
"system_message": null,
"context_length": 2048,
"context_length": 8192,
"max_tokens": 600,
"stop_tokens": null,
"temperature": null,
"top_p": null,
Expand Down
6 changes: 6 additions & 0 deletions schema/json/LLM.json
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,12 @@
"default": 2048,
"type": "integer"
},
"max_tokens": {
"title": "Max Tokens",
"description": "The maximum number of tokens to generate.",
"default": 600,
"type": "integer"
},
"stop_tokens": {
"title": "Stop Tokens",
"description": "Tokens that will stop the completion.",
Expand Down
6 changes: 6 additions & 0 deletions schema/json/Models.json
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,12 @@
"default": 2048,
"type": "integer"
},
"max_tokens": {
"title": "Max Tokens",
"description": "The maximum number of tokens to generate.",
"default": 600,
"type": "integer"
},
"stop_tokens": {
"title": "Stop Tokens",
"description": "Tokens that will stop the completion.",
Expand Down
1 change: 1 addition & 0 deletions server/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from continuedev.core.models import Models
from continuedev.core.config import CustomCommand, SlashCommand, ContinueConfig
from continuedev.libs.llm import OpenAIFreeTrial
from continuedev.libs.llm import OpenAIAgent

from continuedev.plugins.context_providers import (
DiffContextProvider,
Expand Down
14 changes: 7 additions & 7 deletions server/continuedev/core/autopilot.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from typing import Dict, List, Optional

from aiohttp import ClientPayloadError
from openai import error as openai_errors
import openai
import inspect

from ..libs.util.strings import remove_quotes_and_escapes
Expand Down Expand Up @@ -48,22 +48,22 @@


def get_error_title(e: Exception) -> str:
if isinstance(e, openai_errors.APIError):
if isinstance(e, openai.APIError):
return "OpenAI is overloaded with requests. Please try again."
elif isinstance(e, openai_errors.RateLimitError):
elif isinstance(e, openai.RateLimitError):
return "This OpenAI API key has been rate limited. Please try again."
elif isinstance(e, openai_errors.Timeout):
elif isinstance(e, openai.Timeout):
return "OpenAI timed out. Please try again."
elif (
isinstance(e, openai_errors.InvalidRequestError)
isinstance(e, openai.InvalidRequestError)
and e.code == "context_length_exceeded"
):
return e._message
elif isinstance(e, ClientPayloadError):
return "The request failed. Please try again."
elif isinstance(e, openai_errors.APIConnectionError):
elif isinstance(e, openai.APIConnectionError):
return 'The request failed. Please check your internet connection and try again. If this issue persists, you can use our API key for free by going to VS Code settings and changing the value of continue.OPENAI_API_KEY to ""'
elif isinstance(e, openai_errors.InvalidRequestError):
elif isinstance(e, openai.InvalidRequestError):
return "Invalid request sent to OpenAI. Please try again."
elif "rate_limit_ip_middleware" in e.__str__():
return "You have reached your limit for free usage of our token. You can continue using Continue by entering your own OpenAI API key in VS Code settings."
Expand Down
4 changes: 3 additions & 1 deletion server/continuedev/core/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
from ..libs.llm.replicate import ReplicateLLM
from ..libs.llm.together import TogetherLLM
from ..libs.llm.text_gen_webui import TextGenWebUI

from ..libs.llm.openai_agent import OpenAIAgent

class ContinueSDK(BaseModel):
pass
Expand Down Expand Up @@ -44,6 +44,7 @@ class ContinueSDK(BaseModel):
HuggingFaceTGI,
GooglePaLMAPI,
TextGenWebUI,
OpenAIAgent
]
}

Expand All @@ -60,6 +61,7 @@ class ContinueSDK(BaseModel):
"HuggingFaceTGI": "hf_tgi",
"GooglePaLMAPI": "google_palm_api",
"TextGenWebUI": "text_gen_webui",
"OpenAIAgent": "openai_agent",
}


Expand Down
2 changes: 1 addition & 1 deletion server/continuedev/libs/index/indices/chroma_index.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from ....server.protocols.ide_protocol import AbstractIdeProtocolServer
from .base import CodebaseIndex
from dotenv import load_dotenv
from openai.error import RateLimitError
from openai import RateLimitError
from pydantic import BaseModel

from ..chunkers.chunk import Chunk
Expand Down
1 change: 1 addition & 0 deletions server/continuedev/libs/llm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,3 +12,4 @@
from .replicate import ReplicateLLM # noqa: F401
from .text_gen_webui import TextGenWebUI # noqa: F401
from .together import TogetherLLM # noqa: F401
from .openai_agent import OpenAIAgent # noqa: F401
2 changes: 1 addition & 1 deletion server/continuedev/libs/llm/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from ..util.logging import logger
from .prompts.chat import template_alpaca_messages
import openai
from openai.error import RateLimitError
from openai import RateLimitError
from pydantic import Field, validator

from ...core.main import ChatMessage
Expand Down