Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

added type hints #1171

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
4 changes: 2 additions & 2 deletions interpreter/core/computer/os/os.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def get_selected_text(self):
self.computer.clipboard.copy(current_clipboard)
return selected_text

def notify(self, text):
def notify(self, text: str):
"""
Displays a notification on the computer.
"""
Expand Down Expand Up @@ -71,7 +71,7 @@ def notify(self, text):
import plyer

plyer.notification.notify(title=title, message=text)
except:
except Exception:
# Optional package
pass
except Exception as e:
Expand Down
84 changes: 48 additions & 36 deletions interpreter/core/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import threading
import time
from datetime import datetime
from typing import Any

from ..terminal_interface.terminal_interface import terminal_interface
from ..terminal_interface.utils.display_markdown_message import display_markdown_message
Expand Down Expand Up @@ -41,41 +42,42 @@ class OpenInterpreter:

def __init__(
self,
messages=None,
offline=False,
auto_run=False,
verbose=False,
debug=False,
max_output=2800,
safe_mode="off",
shrink_images=False,
force_task_completion=False,
force_task_completion_message="""Proceed. You CAN run code on my machine. If you want to run code, start your message with "```"! If the entire task I asked for is done, say exactly 'The task is done.' If you need some specific information (like username or password) say EXACTLY 'Please provide more information.' If it's impossible, say 'The task is impossible.' (If I haven't provided a task, say exactly 'Let me know what you'd like to do next.') Otherwise keep going.""",
force_task_completion_breakers=[
messages: list[dict[str, Any]] | None = None,
offline: bool = False,
auto_run: bool = False,
verbose: bool = False,
debug: bool = False,
max_output: int = 2800,
safe_mode: str = "off",
shrink_images: bool = False,
force_task_completion: bool = False,
force_task_completion_message: str = """Proceed. You CAN run code on my machine. If you want to run code, start your message with "```"! If the entire task I asked for is done, say exactly 'The task is done.' If you need some specific information (like username or password) say EXACTLY 'Please provide more information.' If it's impossible, say 'The task is impossible.' (If I haven't provided a task, say exactly 'Let me know what you'd like to do next.') Otherwise keep going.""",
force_task_completion_breakers: list[str] = [
"the task is done.",
"the task is impossible.",
"let me know what you'd like to do next.",
"please provide more information.",
],
disable_telemetry=os.getenv("DISABLE_TELEMETRY", "false").lower() == "true",
in_terminal_interface=False,
conversation_history=True,
conversation_filename=None,
conversation_history_path=get_storage_path("conversations"),
os=False,
speak_messages=False,
llm=None,
system_message=default_system_message,
custom_instructions="",
computer=None,
sync_computer=False,
import_computer_api=False,
skills_path=None,
import_skills=False,
multi_line=False,
disable_telemetry: bool = bool(os.getenv("DISABLE_TELEMETRY", "false").lower())
== True,
in_terminal_interface: bool = False,
conversation_history: bool = True,
conversation_filename: str | None = None,
conversation_history_path: str = get_storage_path("conversations"),
os: bool = False,
speak_messages: bool = False,
llm: Llm | None = None,
system_message: str = default_system_message,
custom_instructions: str = "",
computer: Computer | None = None,
sync_computer: bool = True,
import_computer_api: bool = False,
skills_path: str | None = None,
import_skills: bool = False,
multi_line: bool = False,
):
# State
self.messages = [] if messages is None else messages
self.messages: list[dict[str, Any]] = [] if messages is None else messages
self.responding = False
self.last_messages_count = 0

Expand Down Expand Up @@ -106,14 +108,14 @@ def __init__(
self.speak_messages = speak_messages

# LLM
self.llm = Llm(self) if llm is None else llm
self.llm: Llm = Llm(self) if llm is None else llm

# These are LLM related
self.system_message = system_message
self.custom_instructions = custom_instructions

# Computer
self.computer = Computer(self) if computer is None else computer
self.computer: Computer = Computer(self) if computer is None else computer
self.sync_computer = sync_computer
self.computer.import_computer_api = import_computer_api

Expand All @@ -136,7 +138,13 @@ def wait(self):
def anonymous_telemetry(self) -> bool:
return not self.disable_telemetry and not self.offline

def chat(self, message=None, display=True, stream=False, blocking=True):
def chat(
self,
message: str | None = None,
display: bool = True,
stream: bool = False,
blocking: bool = True,
):
try:
self.responding = True
if self.anonymous_telemetry:
Expand Down Expand Up @@ -186,7 +194,9 @@ def chat(self, message=None, display=True, stream=False, blocking=True):

raise

def _streaming_chat(self, message=None, display=True):
def _streaming_chat(
self, message: str | dict[str, Any] | None = None, display: bool = True
):
# Sometimes a little more code -> a much better experience!
# Display mode actually runs interpreter.chat(display=False, stream=True) from within the terminal_interface.
# wraps the vanilla .chat(display=False) generator in a display.
Expand Down Expand Up @@ -238,7 +248,9 @@ def _streaming_chat(self, message=None, display=True):
# If it's the first message, set the conversation name
if not self.conversation_filename:
first_few_words_list = self.messages[0]["content"][:25].split(" ")
if len(first_few_words_list) >= 2: # for languages like English with blank between words
if (
len(first_few_words_list) >= 2
): # for languages like English with blank between words
first_few_words = "_".join(first_few_words_list[:-1])
else: # for languages like Chinese without blank between words
first_few_words = self.messages[0]["content"][:15]
Expand All @@ -263,7 +275,7 @@ def _streaming_chat(self, message=None, display=True):
json.dump(self.messages, f)
return

raise Exception(
raise RuntimeError(
"`interpreter.chat()` requires a display. Set `display=True` or pass a message into `interpreter.chat(message)`."
)

Expand All @@ -274,7 +286,7 @@ def _respond_and_store(self):
"""

# Utility function
def is_active_line_chunk(chunk):
def is_active_line_chunk(chunk: dict[str, Any]) -> bool:
return "format" in chunk and chunk["format"] == "active_line"

last_flag_base = None
Expand Down Expand Up @@ -357,7 +369,7 @@ def reset(self):
self.messages = []
self.last_messages_count = 0

def display_message(self, markdown):
def display_message(self, markdown: str):
# This is just handy for start_script in profiles.
display_markdown_message(markdown)

Expand Down
9 changes: 6 additions & 3 deletions interpreter/core/llm/llm.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
from typing import Any

import litellm
import tokentrim as tt

from interpreter.core.core import OpenInterpreter

from ...terminal_interface.utils.display_markdown_message import (
display_markdown_message,
)
Expand All @@ -9,15 +13,14 @@
from .utils.convert_to_openai_messages import convert_to_openai_messages

litellm.suppress_debug_info = True
import time


class Llm:
"""
A stateless LMC-style LLM with some helpful properties.
"""

def __init__(self, interpreter):
def __init__(self, interpreter: OpenInterpreter):
# Store a reference to parent interpreter
self.interpreter = interpreter

Expand All @@ -40,7 +43,7 @@ def __init__(self, interpreter):
# Budget manager powered by LiteLLM
self.max_budget = None

def run(self, messages):
def run(self, messages: list[dict[str, Any]]):
"""
We're responsible for formatting the call into the llm.completions object,
starting with LMC messages in interpreter.messages, going to OpenAI compatible messages into the llm,
Expand Down
8 changes: 6 additions & 2 deletions interpreter/core/llm/run_function_calling_llm.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,11 @@
from typing import Any

from interpreter.core.llm.llm import Llm

from .utils.merge_deltas import merge_deltas
from .utils.parse_partial_json import parse_partial_json

function_schema = {
function_schema: dict[str, Any] = {
"name": "execute",
"description": "Executes code on the user's machine **in the users local environment** and returns the output",
"parameters": {
Expand All @@ -21,7 +25,7 @@
}


def run_function_calling_llm(llm, request_params):
def run_function_calling_llm(llm: Llm, request_params: dict[str, Any]):
## Setup

# Add languages OI has access to
Expand Down
7 changes: 6 additions & 1 deletion interpreter/core/llm/run_text_llm.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,9 @@
def run_text_llm(llm, params):
from typing import Any

from interpreter.core.llm.llm import Llm


def run_text_llm(llm: Llm, params: dict[str, Any]):
## Setup

try:
Expand Down
13 changes: 7 additions & 6 deletions interpreter/core/llm/utils/convert_to_openai_messages.py
Original file line number Diff line number Diff line change
@@ -1,21 +1,22 @@
import base64
import io
import json
from typing import Any

from PIL import Image


def convert_to_openai_messages(
messages,
function_calling=True,
vision=False,
shrink_images=True,
code_output_sender="assistant",
messages: list[dict[str, Any]],
function_calling: bool = True,
vision: bool = False,
shrink_images: bool = True,
code_output_sender: str = "assistant",
):
"""
Converts LMC messages into OpenAI messages
"""
new_messages = []
new_messages: list[dict[str, Any]] = []

for message in messages:
# Is this for thine eyes?
Expand Down
5 changes: 4 additions & 1 deletion interpreter/core/llm/utils/merge_deltas.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
def merge_deltas(original, delta):
from typing import Any


def merge_deltas(original: dict[str, Any], delta):
"""
Pushes the delta into the original and returns that.

Expand Down
6 changes: 3 additions & 3 deletions interpreter/core/llm/utils/parse_partial_json.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import re


def parse_partial_json(s):
def parse_partial_json(s: str):
# Attempt to parse the string as-is.
try:
return json.loads(s)
Expand All @@ -11,7 +11,7 @@ def parse_partial_json(s):

# Initialize variables.
new_s = ""
stack = []
stack: list[str] = []
is_inside_string = False
escaped = False

Expand Down Expand Up @@ -55,6 +55,6 @@ def parse_partial_json(s):
# Attempt to parse the modified string as JSON.
try:
return json.loads(new_s)
except:
except Exception:
# If we still can't parse the string as JSON, return None to indicate failure.
return None
5 changes: 3 additions & 2 deletions interpreter/core/render_message.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
import re
import time

from interpreter.core.core import OpenInterpreter

def render_message(interpreter, message):

def render_message(interpreter: OpenInterpreter, message: str):
"""
Renders a dynamic message into a string.
"""
Expand Down
9 changes: 4 additions & 5 deletions interpreter/core/respond.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,13 @@

import litellm

from interpreter.core.core import OpenInterpreter

from ..terminal_interface.utils.display_markdown_message import display_markdown_message
from .render_message import render_message


def respond(interpreter):
def respond(interpreter: OpenInterpreter):
"""
Yields chunks.
Responds until it decides not to run any more code or say anything else.
Expand Down Expand Up @@ -112,10 +114,7 @@ def respond(interpreter):
)
elif interpreter.offline and not interpreter.os:
print(traceback.format_exc())
raise Exception(
"Error occurred. "
+ str(e)
)
raise Exception("Error occurred. " + str(e))
else:
raise

Expand Down
8 changes: 5 additions & 3 deletions interpreter/core/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,13 @@
from typing import Generator

import uvicorn
from fastapi import Body, FastAPI, Request, Response, WebSocket
from fastapi.responses import PlainTextResponse, StreamingResponse
from fastapi import FastAPI, Request, Response, WebSocket
from fastapi.responses import PlainTextResponse

from interpreter.core.core import OpenInterpreter

def server(interpreter, host="0.0.0.0", port=8000):

def server(interpreter: OpenInterpreter, host: str = "0.0.0.0", port: int = 8000):
app = FastAPI()

@app.post("/chat")
Expand Down
3 changes: 2 additions & 1 deletion interpreter/core/utils/lazy_import.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
import importlib.util
import sys

def lazy_import(name, optional=True):

def lazy_import(name: str, optional: bool = True):
"""Lazily import a module, specified by the name. Useful for optional packages, to speed up startup times."""
# Check if module is already imported
if name in sys.modules:
Expand Down