Skip to content

Commit

Permalink
Merge pull request open-webui#7475 from open-webui/dev
Browse files Browse the repository at this point in the history
0.4.7
  • Loading branch information
tjbck authored Dec 1, 2024
2 parents 0a26c41 + 19bcda2 commit c4ea313
Show file tree
Hide file tree
Showing 87 changed files with 3,336 additions and 1,106 deletions.
18 changes: 18 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,24 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).

## [0.4.7] - 2024-12-01

### Added

- **✨ Prompt Input Auto-Completion**: Type a prompt and let AI intelligently suggest and complete your inputs. Simply press 'Tab' or swipe right on mobile to confirm. Available only with Rich Text Input (default setting). Disable via Admin Settings for full control.
- **🌍 Improved Translations**: Enhanced localization for multiple languages, ensuring a more polished and accessible experience for international users.

### Fixed

- **🛠️ Tools Export Issue**: Resolved a critical issue where exporting tools wasn’t functioning, restoring seamless export capabilities.
- **🔗 Model ID Registration**: Fixed an issue where model IDs weren’t registering correctly in the model editor, ensuring reliable model setup and tracking.
- **🖋️ Textarea Auto-Expansion**: Corrected a bug where textareas didn’t expand automatically on certain browsers, improving usability for multi-line inputs.
- **🔧 Ollama Embed Endpoint**: Addressed the /ollama/embed endpoint malfunction, ensuring consistent performance and functionality.

### Changed

- **🎨 Knowledge Base Styling**: Refined knowledge base visuals for a cleaner, more modern look, laying the groundwork for further enhancements in upcoming releases.

## [0.4.6] - 2024-11-26

### Added
Expand Down
2 changes: 1 addition & 1 deletion backend/open_webui/apps/ollama/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -706,7 +706,7 @@ async def generate_embeddings(
url_idx: Optional[int] = None,
user=Depends(get_verified_user),
):
return generate_ollama_batch_embeddings(form_data, url_idx)
return await generate_ollama_batch_embeddings(form_data, url_idx)


@app.post("/api/embeddings")
Expand Down
23 changes: 21 additions & 2 deletions backend/open_webui/apps/retrieval/loaders/youtube.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,12 @@
import logging

from typing import Any, Dict, Generator, List, Optional, Sequence, Union
from urllib.parse import parse_qs, urlparse
from langchain_core.documents import Document
from open_webui.env import SRC_LOG_LEVELS

log = logging.getLogger(__name__)
log.setLevel(SRC_LOG_LEVELS["RAG"])

ALLOWED_SCHEMES = {"http", "https"}
ALLOWED_NETLOCS = {
Expand Down Expand Up @@ -51,12 +56,14 @@ def __init__(
self,
video_id: str,
language: Union[str, Sequence[str]] = "en",
proxy_url: Optional[str] = None,
):
"""Initialize with YouTube video ID."""
_video_id = _parse_video_id(video_id)
self.video_id = _video_id if _video_id is not None else video_id
self._metadata = {"source": video_id}
self.language = language
self.proxy_url = proxy_url
if isinstance(language, str):
self.language = [language]
else:
Expand All @@ -76,10 +83,22 @@ def load(self) -> List[Document]:
"Please install it with `pip install youtube-transcript-api`."
)

if self.proxy_url:
youtube_proxies = {
"http": self.proxy_url,
"https": self.proxy_url,
}
# Don't log complete URL because it might contain secrets
log.debug(f"Using proxy URL: {self.proxy_url[:14]}...")
else:
youtube_proxies = None

try:
transcript_list = YouTubeTranscriptApi.list_transcripts(self.video_id)
transcript_list = YouTubeTranscriptApi.list_transcripts(
self.video_id, proxies=youtube_proxies
)
except Exception as e:
print(e)
log.exception("Loading YouTube transcript failed")
return []

try:
Expand Down
14 changes: 11 additions & 3 deletions backend/open_webui/apps/retrieval/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,7 @@
TIKA_SERVER_URL,
UPLOAD_DIR,
YOUTUBE_LOADER_LANGUAGE,
YOUTUBE_LOADER_PROXY_URL,
DEFAULT_LOCALE,
AppConfig,
)
Expand Down Expand Up @@ -171,6 +172,7 @@
app.state.config.PDF_EXTRACT_IMAGES = PDF_EXTRACT_IMAGES

app.state.config.YOUTUBE_LOADER_LANGUAGE = YOUTUBE_LOADER_LANGUAGE
app.state.config.YOUTUBE_LOADER_PROXY_URL = YOUTUBE_LOADER_PROXY_URL
app.state.YOUTUBE_LOADER_TRANSLATION = None


Expand Down Expand Up @@ -471,6 +473,7 @@ async def get_rag_config(user=Depends(get_admin_user)):
"youtube": {
"language": app.state.config.YOUTUBE_LOADER_LANGUAGE,
"translation": app.state.YOUTUBE_LOADER_TRANSLATION,
"proxy_url": app.state.config.YOUTUBE_LOADER_PROXY_URL,
},
"web": {
"web_loader_ssl_verification": app.state.config.ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION,
Expand Down Expand Up @@ -518,6 +521,7 @@ class ChunkParamUpdateForm(BaseModel):
class YoutubeLoaderConfig(BaseModel):
language: list[str]
translation: Optional[str] = None
proxy_url: str = ""


class WebSearchConfig(BaseModel):
Expand Down Expand Up @@ -580,6 +584,7 @@ async def update_rag_config(form_data: ConfigUpdateForm, user=Depends(get_admin_

if form_data.youtube is not None:
app.state.config.YOUTUBE_LOADER_LANGUAGE = form_data.youtube.language
app.state.config.YOUTUBE_LOADER_PROXY_URL = form_data.youtube.proxy_url
app.state.YOUTUBE_LOADER_TRANSLATION = form_data.youtube.translation

if form_data.web is not None:
Expand Down Expand Up @@ -640,6 +645,7 @@ async def update_rag_config(form_data: ConfigUpdateForm, user=Depends(get_admin_
},
"youtube": {
"language": app.state.config.YOUTUBE_LOADER_LANGUAGE,
"proxy_url": app.state.config.YOUTUBE_LOADER_PROXY_URL,
"translation": app.state.YOUTUBE_LOADER_TRANSLATION,
},
"web": {
Expand Down Expand Up @@ -867,7 +873,7 @@ def save_docs_to_vector_db(
return True
except Exception as e:
log.exception(e)
return False
raise e


class ProcessFileForm(BaseModel):
Expand Down Expand Up @@ -897,7 +903,7 @@ def process_file(

docs = [
Document(
page_content=form_data.content,
page_content=form_data.content.replace("<br/>", "\n"),
metadata={
**file.meta,
"name": file.filename,
Expand Down Expand Up @@ -1081,7 +1087,9 @@ def process_youtube_video(form_data: ProcessUrlForm, user=Depends(get_verified_u
collection_name = calculate_sha256_string(form_data.url)[:63]

loader = YoutubeLoader(
form_data.url, language=app.state.config.YOUTUBE_LOADER_LANGUAGE
form_data.url,
language=app.state.config.YOUTUBE_LOADER_LANGUAGE,
proxy_url=app.state.config.YOUTUBE_LOADER_PROXY_URL,
)

docs = loader.load()
Expand Down
10 changes: 7 additions & 3 deletions backend/open_webui/apps/webui/models/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,10 @@ class ToolModel(BaseModel):
####################


class ToolUserModel(ToolModel):
user: Optional[UserResponse] = None


class ToolResponse(BaseModel):
id: str
user_id: str
Expand Down Expand Up @@ -138,13 +142,13 @@ def get_tool_by_id(self, id: str) -> Optional[ToolModel]:
except Exception:
return None

def get_tools(self) -> list[ToolUserResponse]:
def get_tools(self) -> list[ToolUserModel]:
with get_db() as db:
tools = []
for tool in db.query(Tool).order_by(Tool.updated_at.desc()).all():
user = Users.get_user_by_id(tool.user_id)
tools.append(
ToolUserResponse.model_validate(
ToolUserModel.model_validate(
{
**ToolModel.model_validate(tool).model_dump(),
"user": user.model_dump() if user else None,
Expand All @@ -155,7 +159,7 @@ def get_tools(self) -> list[ToolUserResponse]:

def get_tools_by_user_id(
self, user_id: str, permission: str = "write"
) -> list[ToolUserResponse]:
) -> list[ToolUserModel]:
tools = self.get_tools()

return [
Expand Down
72 changes: 72 additions & 0 deletions backend/open_webui/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -583,6 +583,12 @@ def load_oauth_providers():
)

OLLAMA_BASE_URL = os.environ.get("OLLAMA_BASE_URL", "")
if OLLAMA_BASE_URL:
# Remove trailing slash
OLLAMA_BASE_URL = (
OLLAMA_BASE_URL[:-1] if OLLAMA_BASE_URL.endswith("/") else OLLAMA_BASE_URL
)


K8S_FLAG = os.environ.get("K8S_FLAG", "")
USE_OLLAMA_DOCKER = os.environ.get("USE_OLLAMA_DOCKER", "false")
Expand Down Expand Up @@ -998,6 +1004,66 @@ class BannerModel(BaseModel):
</chat_history>
"""

ENABLE_AUTOCOMPLETE_GENERATION = PersistentConfig(
"ENABLE_AUTOCOMPLETE_GENERATION",
"task.autocomplete.enable",
os.environ.get("ENABLE_AUTOCOMPLETE_GENERATION", "True").lower() == "true",
)

AUTOCOMPLETE_GENERATION_INPUT_MAX_LENGTH = PersistentConfig(
"AUTOCOMPLETE_GENERATION_INPUT_MAX_LENGTH",
"task.autocomplete.input_max_length",
int(os.environ.get("AUTOCOMPLETE_GENERATION_INPUT_MAX_LENGTH", "-1")),
)

AUTOCOMPLETE_GENERATION_PROMPT_TEMPLATE = PersistentConfig(
"AUTOCOMPLETE_GENERATION_PROMPT_TEMPLATE",
"task.autocomplete.prompt_template",
os.environ.get("AUTOCOMPLETE_GENERATION_PROMPT_TEMPLATE", ""),
)


DEFAULT_AUTOCOMPLETE_GENERATION_PROMPT_TEMPLATE = """### Task:
You are an autocompletion system. Continue the text in `<text>` based on the **completion type** in `<type>` and the given language.
### **Instructions**:
1. Analyze `<text>` for context and meaning.
2. Use `<type>` to guide your output:
- **General**: Provide a natural, concise continuation.
- **Search Query**: Complete as if generating a realistic search query.
3. Start as if you are directly continuing `<text>`. Do **not** repeat, paraphrase, or respond as a model. Simply complete the text.
4. Ensure the continuation:
- Flows naturally from `<text>`.
- Avoids repetition, overexplaining, or unrelated ideas.
5. If unsure, return: `{ "text": "" }`.
### **Output Rules**:
- Respond only in JSON format: `{ "text": "<your_completion>" }`.
### **Examples**:
#### Example 1:
Input:
<type>General</type>
<text>The sun was setting over the horizon, painting the sky</text>
Output:
{ "text": "with vibrant shades of orange and pink." }
#### Example 2:
Input:
<type>Search Query</type>
<text>Top-rated restaurants in</text>
Output:
{ "text": "New York City for Italian cuisine." }
---
### Context:
<chat_history>
{{MESSAGES:END:6}}
</chat_history>
<type>{{TYPE}}</type>
<text>{{PROMPT}}</text>
#### Output:
"""

TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = PersistentConfig(
"TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE",
Expand Down Expand Up @@ -1259,6 +1325,12 @@ class BannerModel(BaseModel):
os.getenv("YOUTUBE_LOADER_LANGUAGE", "en").split(","),
)

YOUTUBE_LOADER_PROXY_URL = PersistentConfig(
"YOUTUBE_LOADER_PROXY_URL",
"rag.youtube_loader_proxy_url",
os.getenv("YOUTUBE_LOADER_PROXY_URL", ""),
)


ENABLE_RAG_WEB_SEARCH = PersistentConfig(
"ENABLE_RAG_WEB_SEARCH",
Expand Down
1 change: 1 addition & 0 deletions backend/open_webui/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,5 +113,6 @@ def __str__(self) -> str:
TAGS_GENERATION = "tags_generation"
EMOJI_GENERATION = "emoji_generation"
QUERY_GENERATION = "query_generation"
AUTOCOMPLETE_GENERATION = "autocomplete_generation"
FUNCTION_CALLING = "function_calling"
MOA_RESPONSE_GENERATION = "moa_response_generation"
4 changes: 2 additions & 2 deletions backend/open_webui/env.py
Original file line number Diff line number Diff line change
Expand Up @@ -373,7 +373,7 @@ def parse_section(section):
AIOHTTP_CLIENT_TIMEOUT = 300

AIOHTTP_CLIENT_TIMEOUT_OPENAI_MODEL_LIST = os.environ.get(
"AIOHTTP_CLIENT_TIMEOUT_OPENAI_MODEL_LIST", "3"
"AIOHTTP_CLIENT_TIMEOUT_OPENAI_MODEL_LIST", "5"
)

if AIOHTTP_CLIENT_TIMEOUT_OPENAI_MODEL_LIST == "":
Expand All @@ -384,7 +384,7 @@ def parse_section(section):
AIOHTTP_CLIENT_TIMEOUT_OPENAI_MODEL_LIST
)
except Exception:
AIOHTTP_CLIENT_TIMEOUT_OPENAI_MODEL_LIST = 3
AIOHTTP_CLIENT_TIMEOUT_OPENAI_MODEL_LIST = 5

####################################
# OFFLINE_MODE
Expand Down
Loading

0 comments on commit c4ea313

Please sign in to comment.