Skip to content

Commit

Permalink
Merge pull request #1965 from open-webui/dev
Browse files Browse the repository at this point in the history
0.1.124
  • Loading branch information
tjbck committed May 8, 2024
2 parents 30b0531 + b44ae53 commit b8d7fdf
Show file tree
Hide file tree
Showing 106 changed files with 4,941 additions and 1,565 deletions.
19 changes: 10 additions & 9 deletions .github/dependabot.yml
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
version: 2
updates:
- package-ecosystem: pip
directory: "/backend"
schedule:
interval: daily
time: "13:00"
groups:
python-packages:
patterns:
- "*"
- package-ecosystem: pip
directory: '/backend'
schedule:
interval: daily
time: '13:00'
- package-ecosystem: 'github-actions'
directory: '/'
schedule:
# Check for updates to GitHub Actions every week
interval: 'weekly'
1 change: 1 addition & 0 deletions .github/pull_request_template.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
## Pull Request Checklist

- [ ] **Target branch:** Pull requests should target the `dev` branch.
- [ ] **Description:** Briefly describe the changes in this pull request.
- [ ] **Changelog:** Ensure a changelog entry following the format of [Keep a Changelog](https://keepachangelog.com/) is added at the bottom of the PR description.
- [ ] **Documentation:** Have you updated relevant documentation [Open WebUI Docs](https://github.com/open-webui/docs), or other documentation sources?
Expand Down
11 changes: 10 additions & 1 deletion .github/workflows/integration-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,16 @@ jobs:

- name: Build and run Compose Stack
run: |
docker compose up --detach --build
docker compose --file docker-compose.yaml --file docker-compose.api.yaml up --detach --build
- name: Wait for Ollama to be up
timeout-minutes: 5
run: |
until curl --output /dev/null --silent --fail http://localhost:11434; do
printf '.'
sleep 1
done
echo "Service is up!"
- name: Preload Ollama model
run: |
Expand Down
22 changes: 22 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,28 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).

## [0.1.124] - 2024-05-08

### Added

- **🖼️ Improved Chat Sidebar**: Now conveniently displays time ranges and organizes chats by today, yesterday, and more.
- **📜 Citations in RAG Feature**: Easily track the context fed to the LLM with added citations in the RAG feature.
- **🔒 Auth Disable Option**: Introducing the ability to disable authentication. Set 'WEBUI_AUTH' to False to disable authentication. Note: Only applicable for fresh installations without existing users.
- **📹 Enhanced YouTube RAG Pipeline**: Now supports non-English videos for an enriched experience.
- **🔊 Specify OpenAI TTS Models**: Customize your TTS experience by specifying OpenAI TTS models.
- **🔧 Additional Environment Variables**: Discover more environment variables in our comprehensive documentation at Open WebUI Documentation (https://docs.openwebui.com).
- **🌐 Language Support**: Arabic, Finnish, and Hindi added; Improved support for German, Vietnamese, and Chinese.

### Fixed

- **🛠️ Model Selector Styling**: Addressed styling issues for improved user experience.
- **⚠️ Warning Messages**: Resolved backend warning messages.

### Changed

- **📝 Title Generation**: Limited output to 50 tokens.
- **📦 Helm Charts**: Removed Helm charts, now available in a separate repository (https://github.com/open-webui/helm-charts).

## [0.1.123] - 2024-05-02

### Added
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ We offer various installation alternatives, including non-Docker methods, Docker

### Troubleshooting

Encountering connection issues? Our [Open WebUI Documentation](https://docs.openwebui.com/getting-started/troubleshooting/) has got you covered. For further assistance and to join our vibrant community, visit the [Open WebUI Discord](https://discord.gg/5rJgQTnV4s).
Encountering connection issues? Our [Open WebUI Documentation](https://docs.openwebui.com/troubleshooting/) has got you covered. For further assistance and to join our vibrant community, visit the [Open WebUI Discord](https://discord.gg/5rJgQTnV4s).

### Keeping Your Docker Installation Up-to-Date

Expand Down
12 changes: 12 additions & 0 deletions backend/apps/audio/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,8 @@
DEVICE_TYPE,
AUDIO_OPENAI_API_BASE_URL,
AUDIO_OPENAI_API_KEY,
AUDIO_OPENAI_API_MODEL,
AUDIO_OPENAI_API_VOICE,
)

log = logging.getLogger(__name__)
Expand All @@ -60,6 +62,8 @@

app.state.OPENAI_API_BASE_URL = AUDIO_OPENAI_API_BASE_URL
app.state.OPENAI_API_KEY = AUDIO_OPENAI_API_KEY
app.state.OPENAI_API_MODEL = AUDIO_OPENAI_API_MODEL
app.state.OPENAI_API_VOICE = AUDIO_OPENAI_API_VOICE

# setting device type for whisper model
whisper_device_type = DEVICE_TYPE if DEVICE_TYPE and DEVICE_TYPE == "cuda" else "cpu"
Expand All @@ -72,13 +76,17 @@
class OpenAIConfigUpdateForm(BaseModel):
url: str
key: str
model: str
speaker: str


@app.get("/config")
async def get_openai_config(user=Depends(get_admin_user)):
return {
"OPENAI_API_BASE_URL": app.state.OPENAI_API_BASE_URL,
"OPENAI_API_KEY": app.state.OPENAI_API_KEY,
"OPENAI_API_MODEL": app.state.OPENAI_API_MODEL,
"OPENAI_API_VOICE": app.state.OPENAI_API_VOICE,
}


Expand All @@ -91,11 +99,15 @@ async def update_openai_config(

app.state.OPENAI_API_BASE_URL = form_data.url
app.state.OPENAI_API_KEY = form_data.key
app.state.OPENAI_API_MODEL = form_data.model
app.state.OPENAI_API_VOICE = form_data.speaker

return {
"status": True,
"OPENAI_API_BASE_URL": app.state.OPENAI_API_BASE_URL,
"OPENAI_API_KEY": app.state.OPENAI_API_KEY,
"OPENAI_API_MODEL": app.state.OPENAI_API_MODEL,
"OPENAI_API_VOICE": app.state.OPENAI_API_VOICE,
}


Expand Down
4 changes: 4 additions & 0 deletions backend/apps/litellm/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,10 @@
LITELLM_PROXY_HOST,
)

import warnings

warnings.simplefilter("ignore")

from litellm.utils import get_llm_provider

import asyncio
Expand Down
93 changes: 85 additions & 8 deletions backend/apps/ollama/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,13 +25,19 @@
import aiohttp
import asyncio
import logging
import time
from urllib.parse import urlparse
from typing import Optional, List, Union


from apps.web.models.users import Users
from constants import ERROR_MESSAGES
from utils.utils import decode_token, get_current_user, get_admin_user
from utils.utils import (
decode_token,
get_current_user,
get_verified_user,
get_admin_user,
)


from config import (
Expand Down Expand Up @@ -164,7 +170,7 @@ async def get_all_models():
@app.get("/api/tags")
@app.get("/api/tags/{url_idx}")
async def get_ollama_tags(
url_idx: Optional[int] = None, user=Depends(get_current_user)
url_idx: Optional[int] = None, user=Depends(get_verified_user)
):
if url_idx == None:
models = await get_all_models()
Expand Down Expand Up @@ -563,7 +569,7 @@ async def delete_model(


@app.post("/api/show")
async def show_model_info(form_data: ModelNameForm, user=Depends(get_current_user)):
async def show_model_info(form_data: ModelNameForm, user=Depends(get_verified_user)):
if form_data.name not in app.state.MODELS:
raise HTTPException(
status_code=400,
Expand Down Expand Up @@ -612,7 +618,7 @@ class GenerateEmbeddingsForm(BaseModel):
async def generate_embeddings(
form_data: GenerateEmbeddingsForm,
url_idx: Optional[int] = None,
user=Depends(get_current_user),
user=Depends(get_verified_user),
):
if url_idx == None:
model = form_data.model
Expand Down Expand Up @@ -730,7 +736,7 @@ class GenerateCompletionForm(BaseModel):
async def generate_completion(
form_data: GenerateCompletionForm,
url_idx: Optional[int] = None,
user=Depends(get_current_user),
user=Depends(get_verified_user),
):

if url_idx == None:
Expand Down Expand Up @@ -833,7 +839,7 @@ class GenerateChatCompletionForm(BaseModel):
async def generate_chat_completion(
form_data: GenerateChatCompletionForm,
url_idx: Optional[int] = None,
user=Depends(get_current_user),
user=Depends(get_verified_user),
):

if url_idx == None:
Expand Down Expand Up @@ -942,7 +948,7 @@ class OpenAIChatCompletionForm(BaseModel):
async def generate_openai_chat_completion(
form_data: OpenAIChatCompletionForm,
url_idx: Optional[int] = None,
user=Depends(get_current_user),
user=Depends(get_verified_user),
):

if url_idx == None:
Expand Down Expand Up @@ -1026,6 +1032,75 @@ def stream_content():
)


@app.get("/v1/models")
@app.get("/v1/models/{url_idx}")
async def get_openai_models(
url_idx: Optional[int] = None,
user=Depends(get_verified_user),
):
if url_idx == None:
models = await get_all_models()

if app.state.ENABLE_MODEL_FILTER:
if user.role == "user":
models["models"] = list(
filter(
lambda model: model["name"] in app.state.MODEL_FILTER_LIST,
models["models"],
)
)

return {
"data": [
{
"id": model["model"],
"object": "model",
"created": int(time.time()),
"owned_by": "openai",
}
for model in models["models"]
],
"object": "list",
}

else:
url = app.state.OLLAMA_BASE_URLS[url_idx]
try:
r = requests.request(method="GET", url=f"{url}/api/tags")
r.raise_for_status()

models = r.json()

return {
"data": [
{
"id": model["model"],
"object": "model",
"created": int(time.time()),
"owned_by": "openai",
}
for model in models["models"]
],
"object": "list",
}

except Exception as e:
log.exception(e)
error_detail = "Open WebUI: Server Connection Error"
if r is not None:
try:
res = r.json()
if "error" in res:
error_detail = f"Ollama: {res['error']}"
except:
error_detail = f"Ollama: {e}"

raise HTTPException(
status_code=r.status_code if r else 500,
detail=error_detail,
)


class UrlForm(BaseModel):
url: str

Expand Down Expand Up @@ -1241,7 +1316,9 @@ def file_process_stream():


@app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE"])
async def deprecated_proxy(path: str, request: Request, user=Depends(get_current_user)):
async def deprecated_proxy(
path: str, request: Request, user=Depends(get_verified_user)
):
url = app.state.OLLAMA_BASE_URLS[0]
target_url = f"{url}/{path}"

Expand Down

0 comments on commit b8d7fdf

Please sign in to comment.