forked from xtekky/gpt4free
-
Notifications
You must be signed in to change notification settings - Fork 2
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Adding New Models and Enhancing Provider Functionality
- Loading branch information
kqlio67
committed
Feb 5, 2025
1 parent
9ba2831
commit 86d5ba2
Showing
8 changed files
with
127 additions
and
155 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -15,7 +15,7 @@ | |
from ..image import to_data_uri | ||
from ..cookies import get_cookies_dir | ||
from .helper import format_prompt, format_image_prompt | ||
from ..providers.response import JsonConversation, ImageResponse, Reasoning | ||
from ..providers.response import JsonConversation, ImageResponse | ||
|
||
class Conversation(JsonConversation): | ||
validated_value: str = None | ||
|
@@ -39,10 +39,9 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): | |
default_vision_model = default_model | ||
default_image_model = 'ImageGeneration' | ||
image_models = [default_image_model] | ||
vision_models = [default_vision_model, 'gpt-4o', 'o3-mini', 'gemini-pro', 'DeepSeek-V3', 'gemini-1.5-flash', 'llama-3.1-8b', 'llama-3.1-70b', 'llama-3.1-405b'] | ||
reasoning_models = ['DeepSeek-R1'] | ||
vision_models = [default_vision_model, 'gpt-4o', 'o3-mini', 'gemini-pro', 'gemini-1.5-flash', 'llama-3.1-8b', 'llama-3.1-70b', 'llama-3.1-405b'] | ||
|
||
userSelectedModel = ['gpt-4o', 'o3-mini', 'claude-sonnet-3.5', 'gemini-pro', 'blackboxai-pro'] | ||
userSelectedModel = ['gpt-4o', 'o3-mini', 'gemini-pro', 'claude-sonnet-3.5', 'DeepSeek-V3', 'DeepSeek-R1', 'blackboxai-pro', 'Meta-Llama-3.3-70B-Instruct-Turbo', 'Mistral-Small-24B-Instruct-2501', 'DeepSeek-LLM-Chat-(67B)', 'DBRX-Instruct', 'Qwen-QwQ-32B-Preview', 'Nous-Hermes-2-Mixtral-8x7B-DPO'] | ||
|
||
agentMode = { | ||
'DeepSeek-V3': {'mode': True, 'id': "deepseek-chat", 'name': "DeepSeek-V3"}, | ||
|
@@ -56,6 +55,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): | |
} | ||
|
||
trendingAgentMode = { | ||
"o3-mini": {'mode': True, 'id': 'o3-mini'}, | ||
"gemini-1.5-flash": {'mode': True, 'id': 'Gemini'}, | ||
"llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"}, | ||
'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"}, | ||
|
@@ -94,9 +94,11 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): | |
'builder Agent': {'mode': True, 'id': "builder Agent"}, | ||
} | ||
|
||
models = list(dict.fromkeys([default_model, *userSelectedModel, *reasoning_models, *image_models, *list(agentMode.keys()), *list(trendingAgentMode.keys())])) | ||
models = list(dict.fromkeys([default_model, *userSelectedModel, *image_models, *list(agentMode.keys()), *list(trendingAgentMode.keys())])) | ||
|
||
model_aliases = { | ||
"gpt-4": "gpt-4o", | ||
"claude-3.5-sonnet": "claude-sonnet-3.5", | ||
"gemini-1.5-flash": "gemini-1.5-flash", | ||
"gemini-1.5-pro": "gemini-pro", | ||
"deepseek-v3": "DeepSeek-V3", | ||
|
@@ -177,7 +179,6 @@ async def create_async_generator( | |
messages: Messages, | ||
prompt: str = None, | ||
proxy: str = None, | ||
web_search: bool = False, | ||
images: ImagesType = None, | ||
top_p: float = None, | ||
temperature: float = None, | ||
|
@@ -283,60 +284,20 @@ async def create_async_generator( | |
"vscodeClient": False, | ||
"codeInterpreterMode": False, | ||
"customProfile": {"name": "", "occupation": "", "traits": [], "additionalInfo": "", "enableNewChats": False}, | ||
"webSearchMode": web_search | ||
"session": {"user":{"name":"John Doe","email":"[email protected]","image":"https://lh3.googleusercontent.com/a/ACg8ocK9X7mNpQ2vR4jH3tY8wL5nB1xM6fDS9JW2kLpTn4Vy3hR2xN4m=s96-c"},"expires":datetime.now(timezone.utc).isoformat(timespec='milliseconds').replace('+00:00', 'Z'), "status": "PREMIUM"}, | ||
"webSearchMode": False | ||
} | ||
|
||
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: | ||
await raise_for_status(response) | ||
response_text = await response.text() | ||
parts = response_text.split('$~~~$') | ||
text_to_yield = parts[2] if len(parts) >= 3 else response_text | ||
|
||
if not text_to_yield or text_to_yield.isspace(): | ||
return | ||
|
||
if model in cls.reasoning_models and "\n\n\n" in text_to_yield: | ||
think_split = text_to_yield.split("\n\n\n", 1) | ||
if len(think_split) > 1: | ||
think_content, answer = think_split[0].strip(), think_split[1].strip() | ||
yield Reasoning(status=think_content) | ||
yield answer | ||
else: | ||
yield text_to_yield | ||
elif "<think>" in text_to_yield: | ||
pre_think, rest = text_to_yield.split('<think>', 1) | ||
think_content, post_think = rest.split('</think>', 1) | ||
|
||
pre_think = pre_think.strip() | ||
think_content = think_content.strip() | ||
post_think = post_think.strip() | ||
|
||
if pre_think: | ||
yield pre_think | ||
if think_content: | ||
yield Reasoning(status=think_content) | ||
if post_think: | ||
yield post_think | ||
|
||
elif "Generated by BLACKBOX.AI" in text_to_yield: | ||
conversation.validated_value = await cls.fetch_validated(force_refresh=True) | ||
if conversation.validated_value: | ||
data["validated"] = conversation.validated_value | ||
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as new_response: | ||
await raise_for_status(new_response) | ||
new_response_text = await new_response.text() | ||
new_parts = new_response_text.split('$~~~$') | ||
new_text = new_parts[2] if len(new_parts) >= 3 else new_response_text | ||
|
||
if new_text and not new_text.isspace(): | ||
yield new_text | ||
else: | ||
if text_to_yield and not text_to_yield.isspace(): | ||
yield text_to_yield | ||
else: | ||
if text_to_yield and not text_to_yield.isspace(): | ||
yield text_to_yield | ||
full_response = [] | ||
async for chunk in response.content.iter_any(): | ||
if chunk: | ||
chunk_text = chunk.decode() | ||
full_response.append(chunk_text) | ||
yield chunk_text | ||
|
||
if return_conversation: | ||
conversation.message_history.append({"role": "assistant", "content": text_to_yield}) | ||
full_response_text = ''.join(full_response) | ||
conversation.message_history.append({"role": "assistant", "content": full_response_text}) | ||
yield conversation |
Oops, something went wrong.