diff --git a/app/lib/models/ai.dart b/app/lib/models/ai.dart index 310d672..8082952 100644 --- a/app/lib/models/ai.dart +++ b/app/lib/models/ai.dart @@ -1,8 +1,8 @@ import 'dart:math'; +import 'package:glowby/services/network.dart'; import 'package:glowby/services/pulze_ai_api.dart'; -import '../services/openai_api.dart'; import '../utils/timestamp.dart'; import '../views/widgets/message.dart'; import 'package:async/async.dart'; @@ -37,19 +37,11 @@ class Ai { } // Call the OpenAI API if no matching questions are found locally - if (aiEnabled && OpenAiApi.oat().isNotEmpty) { - networkOperation = OpenAiApi.getResponseFromOpenAI(message, + if (aiEnabled && PulzeAiApi.oat().isNotEmpty) { + networkOperation = Network.getResponseFromPulze(message, previousMessages: previousMessages); String response = await networkOperation!.value; - String poweredTitle = OpenAiApi.model == 'gpt-4' - ? 'Powered by GPT-4' - : OpenAiApi.model == 'gpt-3.5-turbo' - ? 'Powered by GPT-3.5' - : OpenAiApi.model == 'gpt-4-1106-preview' - ? 'Powered by GPT-4 Turbo' - : OpenAiApi.model == 'pulzeai' - ? PulzeAiApi.lastUsedModel() - : ''; + String poweredTitle = PulzeAiApi.lastUsedModel(); return [ Message( text: response, diff --git a/app/lib/services/network.dart b/app/lib/services/network.dart new file mode 100644 index 0000000..9a04455 --- /dev/null +++ b/app/lib/services/network.dart @@ -0,0 +1,100 @@ +import 'package:flutter_secure_storage/flutter_secure_storage.dart'; +import 'package:async/async.dart'; +import 'package:glowby/services/pulze_ai_api.dart'; + +class Network { + static final Network _instance = Network._privateConstructor(); + factory Network() => _instance; + Network._privateConstructor(); + + static String defaultSystemPrompt = + 'You are Glowby, super helpful, nice, and humorous AI assistant ready to help with anything. I like to joke around.'; + + static String defaultSystemPromptComplexTask = + 'You are Glowby, an AI assistant designed to break down complex tasks into a manageable 5-step plan. For each step, you offer the user 3 options to choose from. Once the user selects an option, you proceed to the next step based on their choice. After the user has chosen an option for the fifth step, you provide them with a customized, actionable plan based on their previous responses. You only reveal the current step and options to ensure an engaging, interactive experience.'; + static String model = 'gpt-3.5-turbo'; //'gpt-4'; + static String selectedLanguage = 'en-US'; + static String systemPrompt = defaultSystemPrompt; + static const String _modelKey = 'openai_model'; + static const String _selectedLanguageKey = 'selected_language'; + static const String _systemPromptKey = 'openai_system_prompt'; + static const FlutterSecureStorage _secureStorage = FlutterSecureStorage(); + + static Future setModel(String value) async { + model = value; + await _secureStorage.write(key: _modelKey, value: model); + } + + static Future setSystemPrompt(String value) async { + systemPrompt = value; + await _secureStorage.write(key: _systemPromptKey, value: systemPrompt); + } + + static Future setSelectedLanguage(String value) async { + selectedLanguage = value; + await _secureStorage.write( + key: _selectedLanguageKey, value: selectedLanguage); + } + + static CancelableOperation getResponseFromPulze( + String message, { + List> previousMessages = const [], + int maxTries = 1, + String? customSystemPrompt, + }) { + // Create a cancelable completer + final completer = CancelableCompleter(); + + _getResponseFromPulzeAI( + message, + completer, + previousMessages: previousMessages, + ); + + return completer.operation; + } + + static String formatPrevMessages( + List> previousMessages) { + return previousMessages.map((message) { + return "${message['role']}: ${message['content']}"; + }).join(', '); + } + + static Future _getResponseFromPulzeAI( + String message, + CancelableCompleter completer, { + List> previousMessages = const [], + }) async { + String? finalResponse = ''; + + if (PulzeAiApi.oat() != '') { + //print(previousMessages); + String formattedPrevMessages = formatPrevMessages(previousMessages); + if (previousMessages.isNotEmpty && PulzeAiApi.sendMessages()) { + finalResponse = await PulzeAiApi.generate( + '$message previousMessages: $formattedPrevMessages'); + } else { + finalResponse = await PulzeAiApi.generate(message); + } + + //print('finalResponse: $finalResponse'); + if (finalResponse != null) { + finalResponse = finalResponse + .replaceAll('assistant: ', '') + .replaceAll('previousMessages: ', '') + .replaceAll('user: ', '') + .replaceAll('[System message]: ', ''); + } + } else { + finalResponse = + 'Please enter your Puzle AI Access Token in the settings.'; + } + + completer.complete(finalResponse); + + // Explicitly return null to avoid + + return; + } +} diff --git a/app/lib/services/openai_api.dart b/app/lib/services/openai_api.dart deleted file mode 100644 index d9dbd75..0000000 --- a/app/lib/services/openai_api.dart +++ /dev/null @@ -1,472 +0,0 @@ -import 'dart:convert'; -import 'package:flutter/foundation.dart'; -import 'package:flutter_secure_storage/flutter_secure_storage.dart'; -import 'package:http/http.dart' as http; -import 'package:async/async.dart'; -import 'package:glowby/services/pulze_ai_api.dart'; - -class OpenAiApi { - static final OpenAiApi _instance = OpenAiApi._privateConstructor(); - factory OpenAiApi() => _instance; - OpenAiApi._privateConstructor(); - - String _apiKey = ''; - static int _totalTokensUsed = 0; - - static String oat() => OpenAiApi()._oat(); - static void setOat(String value) => OpenAiApi()._setOat(value); - static void resetOat() => OpenAiApi()._resetOat(); - - void _resetOat() { - _apiKey = ''; - } - - String _oat() => _apiKey; - Future _setOat(String value) async { - _apiKey = value; - await _secureStorage.write(key: _apiKeyKey, value: _apiKey); - } - - static String defaultSystemPrompt = - 'You are Glowby, super helpful, nice, and humorous AI assistant ready to help with anything. I like to joke around.'; - - static String defaultSystemPromptComplexTask = - 'You are Glowby, an AI assistant designed to break down complex tasks into a manageable 5-step plan. For each step, you offer the user 3 options to choose from. Once the user selects an option, you proceed to the next step based on their choice. After the user has chosen an option for the fifth step, you provide them with a customized, actionable plan based on their previous responses. You only reveal the current step and options to ensure an engaging, interactive experience.'; - static String model = 'gpt-3.5-turbo'; //'gpt-4'; - static String selectedLanguage = 'en-US'; - static String systemPrompt = defaultSystemPrompt; - static const String _apiKeyKey = 'openai_api_key'; - static const String _modelKey = 'openai_model'; - static const String _selectedLanguageKey = 'selected_language'; - static const String _systemPromptKey = 'openai_system_prompt'; - static const FlutterSecureStorage _secureStorage = FlutterSecureStorage(); - - static Future loadOat() async { - try { - setOat(await _secureStorage.read(key: _apiKeyKey) ?? ''); - model = (await _secureStorage.read(key: _modelKey)) ?? 'gpt-3.5-turbo'; - selectedLanguage = - (await _secureStorage.read(key: _selectedLanguageKey)) ?? 'en-US'; - systemPrompt = (await _secureStorage.read(key: _systemPromptKey)) ?? - defaultSystemPrompt; - } catch (e) { - if (kDebugMode) { - print('Error loading OAT: $e'); - } - } - - await PulzeAiApi.loadOat(); - } - - static Future setModel(String value) async { - model = value; - await _secureStorage.write(key: _modelKey, value: model); - } - - static Future setSystemPrompt(String value) async { - systemPrompt = value; - await _secureStorage.write(key: _systemPromptKey, value: systemPrompt); - } - - static Future setSelectedLanguage(String value) async { - selectedLanguage = value; - await _secureStorage.write( - key: _selectedLanguageKey, value: selectedLanguage); - } - - static Future generateImageUrl(String description) async { - // Check if the description is safe - /*bool descriptionIsSafe = await isInputSafe(description); - if (!descriptionIsSafe) { - throw Exception( - 'The input provided is not considered safe. Please provide a different input.'); - }*/ - - final apiKey = OpenAiApi.oat(); - - const queryUrl = 'https://api.openai.com/v1/images/generations'; - final headers = { - 'Content-Type': 'application/json', - 'Authorization': 'Bearer $apiKey', - }; - - final body = jsonEncode({ - 'prompt': description, - 'n': 1, - 'size': '512x512', - }); - if (kDebugMode) { - print('Request URL: $queryUrl'); - } - - final response = - await http.post(Uri.parse(queryUrl), headers: headers, body: body); - if (kDebugMode) { - print('Response Status Code: ${response.statusCode}'); - print('Response Body: ${response.body}'); - } - - if (response.statusCode == 200) { - final jsonResponse = jsonDecode(response.body); - final imageUrl = jsonResponse['data'][0]['url']; - if (kDebugMode) { - print('Generated Image URL: $imageUrl'); - } - - return imageUrl; - } else { - throw Exception('Failed to generate image'); - } - } - - static Future isInputSafe(String input) async { - if (kDebugMode) { - print('isInputSafe called with input: $input'); - } - - // Replace this URL with your AWS Lambda function URL - const lambdaUrl = 'YOUR_LAMBDA_FUNCTION_URL'; - - final headers = { - 'Content-Type': 'application/json', - }; - - final data = { - 'input': input, - }; - - try { - if (kDebugMode) { - print('calling lambda function with input: $input and url: $lambdaUrl'); - } - final response = await http.post( - Uri.parse(lambdaUrl), - headers: headers, - body: jsonEncode(data), - ); - if (kDebugMode) { - print('isInputSafe response status code: ${response.statusCode}'); - print('isInputSafe response headers: ${response.headers}'); - print('isInputSafe response body: ${response.body}'); - } - - if (response.statusCode == 200) { - final responseBody = jsonDecode(response.body); - bool moderationStatus = responseBody['isSafe']; - return moderationStatus; - } else { - if (kDebugMode) { - print('isInputSafe error: Status code ${response.statusCode}'); - } - throw Exception('Failed to get response from Lambda function.'); - } - } catch (e) { - if (kDebugMode) { - print('isInputSafe exception: $e'); - } - rethrow; - } - } - - static int getAdjustedMaxTokens(String inputText, - {int defaultMaxTokens = 300}) { - List keywords = [ - 'code', - 'snippet', - 'class', - 'function', - 'method', - 'generate', - 'create', - 'build', - 'implement', - 'algorithm', - 'example', - 'template', - 'sample', - 'skeleton', - 'structure', - ]; - - bool containsKeyword(String text, List keywords) { - return keywords.any((keyword) => text.toLowerCase().contains(keyword)); - } - - // Increase max tokens if the input text contains any of the keywords - if (containsKeyword(inputText, keywords)) { - return defaultMaxTokens * - 3; // Example: increase max tokens by a factor of 3 - } - - return defaultMaxTokens; - } - - static CancelableOperation getResponseFromOpenAI( - String message, { - List> previousMessages = const [], - int maxTries = 1, - String? customSystemPrompt, - }) { - // Create a cancelable completer - final completer = CancelableCompleter(); - - if (OpenAiApi.model == 'pulzeai') { - _getResponseFromPulzeAI( - message, - completer, - previousMessages: previousMessages, - ); - } else { - // Wrap the _getResponseFromOpenAI with the cancelable completer - _getResponseFromOpenAI( - message, - completer, - previousMessages: previousMessages, - maxTries: maxTries, - customSystemPrompt: customSystemPrompt, - ); - } - - return completer.operation; - } - - static String formatPrevMessages( - List> previousMessages) { - return previousMessages.map((message) { - return "${message['role']}: ${message['content']}"; - }).join(', '); - } - - static Future _getResponseFromPulzeAI( - String message, - CancelableCompleter completer, { - List> previousMessages = const [], - }) async { - String? finalResponse = ''; - - if (PulzeAiApi.oat() != '') { - //print(previousMessages); - String formattedPrevMessages = formatPrevMessages(previousMessages); - if (previousMessages.isNotEmpty && PulzeAiApi.sendMessages()) { - finalResponse = await PulzeAiApi.generate( - '$message previousMessages: $formattedPrevMessages'); - } else { - finalResponse = await PulzeAiApi.generate(message); - } - - //print('finalResponse: $finalResponse'); - if (finalResponse != null) { - finalResponse = finalResponse - .replaceAll('assistant: ', '') - .replaceAll('previousMessages: ', '') - .replaceAll('user: ', '') - .replaceAll('[System message]: ', ''); - } - } else { - finalResponse = - 'Please enter your Puzle AI Access Token in the settings.'; - } - - completer.complete(finalResponse); - - // Explicitly return null to avoid - - return; - } - - static Future _getResponseFromOpenAI( - String message, CancelableCompleter completer, - {List> previousMessages = const [], - int maxTries = 1, - String? customSystemPrompt}) async { - String finalResponse = ''; - String inputMessage = message; - int tries = 0; - - // Check if the message is safe - /*bool messageIsSafe = await isInputSafe(inputMessage); - if (!messageIsSafe) { - finalResponse = - 'Sorry, the input provided is not considered safe. Please provide a different input.'; - completer.complete(finalResponse); - return; - }*/ - - final apiKey = OpenAiApi.oat(); - - while (tries < maxTries) { - if (kDebugMode) { - print('inputMessage = $inputMessage'); - } - const apiUrl = 'https://api.openai.com/v1/chat/completions'; - - final headers = { - 'Content-Type': 'application/json', - 'Authorization': 'Bearer $apiKey', - }; - - final adjustedMaxTokens = getAdjustedMaxTokens(inputMessage); - - final data = { - 'model': model, - 'messages': [ - {'role': 'system', 'content': customSystemPrompt ?? systemPrompt}, - ...previousMessages, - {'role': 'user', 'content': inputMessage} - ], - 'max_tokens': adjustedMaxTokens, - 'n': 1, - 'stop': null, - 'temperature': 1, - }; - - try { - final response = await http.post( - Uri.parse(apiUrl), - headers: headers, - body: jsonEncode(data), - ); - - if (response.statusCode == 200) { - final responseBody = jsonDecode(utf8.decode(response.bodyBytes)); - String receivedResponse = responseBody['choices'][0]['message'] - ['content'] - .toString() - .trim(); - - // Add the current received response to the final response - finalResponse += receivedResponse; - - // Add the tokens used in this response to the total tokens used - int tokensUsed = responseBody['usage']['total_tokens']; - _totalTokensUsed += tokensUsed; - - // Calculate the cost of the tokens used - double cost = tokensUsed * 0.002 / 1000; - if (kDebugMode) { - // Print the tokens used and the cost to the console - print('Tokens used in this response: $tokensUsed'); - print('Cost of this response: \$${cost.toStringAsFixed(5)}'); - print('Total tokens used so far: $_totalTokensUsed'); - } - - double totalCost = _totalTokensUsed * 0.002 / 1000; - if (kDebugMode) { - print('Total cost so far: \$${totalCost.toStringAsFixed(5)}'); - } - - // Check if the received response was cut-off - if (responseBody['choices'][0]['finish_reason'] == 'length') { - // Use the last part of the received response as input for the next request - inputMessage += receivedResponse; - int maxLength = 1024 * 10; // You can set this to a desired limit - if (inputMessage.length > maxLength) { - inputMessage = - inputMessage.substring(inputMessage.length - maxLength); - } - tries++; - } else { - break; - } - } else { - throw Exception('Failed to get response from OpenAI API.'); - } - } catch (e) { - if (tries + 1 < maxTries) { - tries++; - // You can add a delay before retrying the request. - await Future.delayed(const Duration(seconds: 2)); - } else { - finalResponse = - 'Sorry, there was an error processing your request. Please try again later.'; - if (kDebugMode) { - print('Error: $e'); - } - break; - } - } - } - - completer.complete(finalResponse); - - // Explicitly return null to avoid - - return; - } - - // Draw to Code Functionality - - Future getHtmlFromOpenAI( - String imageBase64, String userPrompt) async { - if (_apiKey == '') { - return 'Enter API key in settings'; - } - - const systemPrompt = """ -You are a skilled web developer with expertise in Tailwind CSS. A user will provide a low-fidelity wireframe along with descriptive notes. Your task is to create a high-fidelity, responsive HTML webpage using Tailwind CSS and JavaScript, embedded within a single HTML file. - -- Embed additional CSS and JavaScript directly in the HTML file. -- For images, use placeholders from Unsplash or solid color rectangles. -- Draw inspiration for fonts, colors, and layouts from user-provided style references or wireframes. -- For any previous design iterations, use the provided HTML to refine the design further. -- Apply creative improvements to enhance the design. -- Load JavaScript dependencies through JavaScript modules and unpkg.com. - -The final output should be a single HTML file, starting with "". Avoid markdown, excessive newlines, and the character sequence "```". -"""; // The system prompt - - final openAIKey = _apiKey; // Replace with your actual API key - if (openAIKey.isEmpty) { - return ''; - } - - final url = Uri.parse("https://api.openai.com/v1/chat/completions"); - var request = http.Request("POST", url) - ..headers.addAll({ - 'Content-Type': 'application/json', - 'Authorization': 'Bearer $openAIKey', - }) - ..body = jsonEncode({ - "model": "gpt-4-vision-preview", - "temperature": 0, - "max_tokens": 4096, - "messages": [ - {"role": "system", "content": systemPrompt}, - { - "role": "user", - "content": [ - {"type": "text", "text": userPrompt}, - { - "type": "image_url", - "image_url": {"url": "data:image/jpeg;base64,$imageBase64"} - } - ] - } - ], - }); - - try { - final response = await http.Response.fromStream(await request.send()); - - if (response.statusCode == 200) { - final decodedResponse = jsonDecode(response.body); - // Assuming 'html' is part of the response JSON structure - String html = - decodedResponse['choices']?.first['message']['content'] ?? ''; - // Additional logic to handle the HTML content goes here - return html; - } else { - // Handle the error, maybe throw an exception - if (kDebugMode) { - print('Failed to get HTML from OpenAI: ${response.body}'); - } - return ''; - } - } catch (e) { - if (kDebugMode) { - print('Caught error: $e'); - } - return ''; - } - } -} diff --git a/app/lib/views/dialogs/ai_settings_dialog.dart b/app/lib/views/dialogs/ai_settings_dialog.dart index 4b0c76c..df92317 100644 --- a/app/lib/views/dialogs/ai_settings_dialog.dart +++ b/app/lib/views/dialogs/ai_settings_dialog.dart @@ -1,7 +1,7 @@ import 'package:flutter/material.dart'; +import 'package:glowby/services/network.dart'; import 'package:glowby/views/screens/global_settings.dart'; import 'package:glowby/services/pulze_ai_api.dart'; -import 'package:glowby/services/openai_api.dart'; import 'package:glowby/utils/text_to_speech.dart'; import 'package:glowby/utils/utils.dart'; @@ -15,13 +15,10 @@ class AiSettingsDialog extends StatefulWidget { } class AiSettingsDialogState extends State { - bool _isHuggingFaceSelected = false; bool _isPulzeSelected = false; final TextEditingController _systemPromptController = TextEditingController(); - final TextEditingController _modelIdController = TextEditingController(); final TextEditingController _pulzeModelIdController = TextEditingController(); - final TextEditingController _templateController = TextEditingController(); Widget _buildAutonomousModeCheckbox() { if (GlobalSettings().selectedPrompt == 'Complex Task Prompt') { @@ -80,19 +77,18 @@ class AiSettingsDialogState extends State { @override void initState() { super.initState(); - GlobalSettings().selectedModel = OpenAiApi.model; + GlobalSettings().selectedModel = 'pulzeai'; _systemPromptController.text = GlobalSettings().systemPrompt; //_isGPT4Selected = // _selectedModel == 'gpt-4' || _selectedModel == 'gpt-4-1106-preview'; - _isHuggingFaceSelected = GlobalSettings().selectedModel == 'huggingface'; _isPulzeSelected = GlobalSettings().selectedModel == 'pulzeai'; _pulzeModelIdController.text = PulzeAiApi.model(); } void _saveOpenAISettings() { - OpenAiApi.setModel(GlobalSettings().selectedModel); - OpenAiApi.setSystemPrompt(GlobalSettings().systemPrompt); - OpenAiApi.setSelectedLanguage(GlobalSettings().selectedLanguage); + Network.setModel(GlobalSettings().selectedModel); + Network.setSystemPrompt(GlobalSettings().systemPrompt); + Network.setSelectedLanguage(GlobalSettings().selectedLanguage); } void _saveHuggingFaceSettings() { @@ -152,7 +148,6 @@ class AiSettingsDialogState extends State { GlobalSettings().selectedModel = value!; //_isGPT4Selected = // value == 'gpt-4' || value == 'gpt-4-1106-preview'; - _isHuggingFaceSelected = value == 'huggingface'; _isPulzeSelected = value == 'pulzeai'; }); }, @@ -185,71 +180,28 @@ class AiSettingsDialogState extends State { ), ], ), - if (_isHuggingFaceSelected) - Column( - crossAxisAlignment: CrossAxisAlignment.start, - children: [ - const SizedBox(height: 10), - const Text('Hugging Face Model ID:'), - const SizedBox(height: 6), - InkWell( - child: const Text( - '→ Browse available models', - style: TextStyle(color: Colors.blue), - ), - onTap: () => Utils.launchURL( - 'https://huggingface.co/models?pipeline_tag=text2text-generation&sort=downloads'), - ), - TextField( - controller: - _modelIdController, // Use TextEditingController to retrieve user input - decoration: const InputDecoration( - labelText: 'Model ID', - ), - onChanged: (value) { - // Update your modelId variable here - }, - ), - const SizedBox(height: 10), - const Text('Response Format'), - TextField( - controller: - _templateController, // Use TextEditingController to retrieve user input - maxLines: 5, - decoration: const InputDecoration( - labelText: 'Template (*** is the response)', - ), - onChanged: (value) { - // Update your template variable here - }, - ), - ], - ), const SizedBox(height: 10), - if (!_isHuggingFaceSelected) const Text('System Prompt:'), - if (!_isHuggingFaceSelected) - if (!_isHuggingFaceSelected) - DropdownButton( - value: GlobalSettings().selectedPrompt, - items: buildPromptDropdownItems(), - onChanged: (value) { - setState(() { - _promptChanged(value); - }); - }, - ), - if (!_isHuggingFaceSelected) _buildAutonomousModeCheckbox(), - if (!_isHuggingFaceSelected) - TextField( - controller: _systemPromptController, - maxLines: 3, - decoration: const InputDecoration( - labelText: 'Enter system prompt', - ), - onChanged: (value) { - GlobalSettings().systemPrompt = value; - }, + const Text('System Prompt:'), + DropdownButton( + value: GlobalSettings().selectedPrompt, + items: buildPromptDropdownItems(), + onChanged: (value) { + setState(() { + _promptChanged(value); + }); + }, + ), + _buildAutonomousModeCheckbox(), + TextField( + controller: _systemPromptController, + maxLines: 3, + decoration: const InputDecoration( + labelText: 'Enter system prompt', ), + onChanged: (value) { + GlobalSettings().systemPrompt = value; + }, + ), /*if (_isHuggingFaceSelected) Text('System Message:'), if (_isHuggingFaceSelected) TextField( diff --git a/app/lib/views/dialogs/api_key_dialog.dart b/app/lib/views/dialogs/api_key_dialog.dart index f886ca4..947cfc5 100644 --- a/app/lib/views/dialogs/api_key_dialog.dart +++ b/app/lib/views/dialogs/api_key_dialog.dart @@ -1,5 +1,4 @@ import 'package:flutter/material.dart'; -import 'package:glowby/services/openai_api.dart'; import 'package:glowby/utils/utils.dart'; import '../../services/pulze_ai_api.dart'; @@ -25,11 +24,11 @@ class ApiKeyDialogState extends State { void initState() { super.initState(); - OpenAiApi.loadOat().then((_) { + PulzeAiApi.loadOat().then((_) { setState(() { - _apiKey = OpenAiApi.oat(); _apiKeyController.text = _apiKey; _pulzeAiToken = PulzeAiApi.oat(); + _apiKey = _pulzeAiToken; _pulzeAiController.text = _pulzeAiToken; }); }); @@ -60,7 +59,6 @@ class ApiKeyDialogState extends State { ); } else { // If all keys are valid, set them and show a success message. - OpenAiApi.setOat(_apiKey); PulzeAiApi.setOat(_pulzeAiToken); Navigator.pop(context); // Hide the dialog ScaffoldMessenger.of(context).showSnackBar( diff --git a/app/lib/views/screens/chat_screen.dart b/app/lib/views/screens/chat_screen.dart index dcc9237..351cc04 100644 --- a/app/lib/views/screens/chat_screen.dart +++ b/app/lib/views/screens/chat_screen.dart @@ -1,5 +1,7 @@ import 'package:flutter/material.dart'; import 'package:flutter/services.dart'; +import 'package:glowby/services/network.dart'; +import 'package:glowby/services/pulze_ai_api.dart'; import 'package:glowby/views/screens/global_settings.dart'; import 'package:url_launcher/url_launcher_string.dart'; import 'package:glowby/views/widgets/tasks_view.dart'; @@ -10,7 +12,6 @@ import 'magical_loading_view.dart'; import '../widgets/message.dart'; import '../widgets/new_message.dart'; import '../widgets/messages.dart'; -import '../../services/openai_api.dart'; import '../../utils/text_to_speech.dart'; // Import the new TextToSpeech class import '../dialogs/api_key_dialog.dart'; import '../../utils/timestamp.dart'; // Import the ApiKeyDialog widget @@ -84,7 +85,7 @@ class ChatScreenState extends State { } void loadAPIKey() { - OpenAiApi.loadOat().then((_) => setState(() {})); + PulzeAiApi.loadOat().then((_) => setState(() {})); } // Refresh the UI state of the chat screen @@ -176,7 +177,7 @@ class ChatScreenState extends State { List tasks = []; try { - _currentOperation = OpenAiApi.getResponseFromOpenAI(_lastInputMessage, + _currentOperation = Network.getResponseFromPulze(_lastInputMessage, customSystemPrompt: 'You are Glowby, an AI assistant designed to break down complex tasks into a manageable 5-step plan. The steps should be concise.'); @@ -312,7 +313,7 @@ class ChatScreenState extends State { .reversed .toList(); - _currentOperation = OpenAiApi.getResponseFromOpenAI( + _currentOperation = Network.getResponseFromPulze( message, previousMessages: formattedPreviousMessages, customSystemPrompt: customSystemPrompt, @@ -557,7 +558,7 @@ class ChatScreenState extends State { ), ), // Add the AI Settings button conditionally - if (OpenAiApi.oat().isNotEmpty) + if (PulzeAiApi.oat().isNotEmpty) if (widget._showAiSettings != null && widget._showAiSettings!) Padding( diff --git a/app/lib/views/screens/global_settings.dart b/app/lib/views/screens/global_settings.dart index cfc026a..d1cf0f6 100644 --- a/app/lib/views/screens/global_settings.dart +++ b/app/lib/views/screens/global_settings.dart @@ -1,4 +1,4 @@ -import 'package:glowby/services/openai_api.dart'; +import 'package:glowby/services/network.dart'; class GlobalSettings { static final GlobalSettings _instance = GlobalSettings._internal(); @@ -7,10 +7,10 @@ class GlobalSettings { String userName = 'Me'; bool voiceEnabled = true; - String selectedLanguage = OpenAiApi.selectedLanguage; + String selectedLanguage = Network.selectedLanguage; bool autonomousMode = false; - String selectedModel = OpenAiApi.model; - String systemPrompt = OpenAiApi.systemPrompt; + String selectedModel = Network.model; + String systemPrompt = Network.systemPrompt; String selectedPrompt = 'Simple Assistant Prompt'; factory GlobalSettings() { @@ -148,9 +148,9 @@ Human: You choose anything you like. Direction comes from the next message. One void loadDialogValues(selectedModelInput, selectedLanguageInput, systemPromptInput, autonomousModeInput) { selectedPrompt = 'Simple Assistant Prompt'; - selectedModel = OpenAiApi.model; - systemPrompt = OpenAiApi.systemPrompt; - selectedLanguage = OpenAiApi.selectedLanguage; + selectedModel = Network.model; + systemPrompt = Network.systemPrompt; + selectedLanguage = Network.selectedLanguage; autonomousMode = false; if (selectedModelInput != null && selectedModelInput != '') { diff --git a/app/lib/views/screens/talk_screen.dart b/app/lib/views/screens/talk_screen.dart index b894e1b..1fc2dfb 100644 --- a/app/lib/views/screens/talk_screen.dart +++ b/app/lib/views/screens/talk_screen.dart @@ -1,6 +1,5 @@ import 'dart:convert'; import 'package:flutter/material.dart'; -import 'package:glowby/services/openai_api.dart'; import 'package:glowby/services/pulze_ai_api.dart'; import 'package:glowby/utils/utils.dart'; import '../../utils/color_utils.dart'; @@ -34,7 +33,6 @@ class TalkState extends State { } Future loadAPIKeys() async { - await OpenAiApi.loadOat(); await PulzeAiApi.loadOat(); } @@ -46,7 +44,6 @@ class TalkState extends State { } void resetApiKeys() { - OpenAiApi.resetOat(); PulzeAiApi.resetOat(); } diff --git a/app/lib/views/widgets/new_message.dart b/app/lib/views/widgets/new_message.dart index 3e40fca..284e765 100644 --- a/app/lib/views/widgets/new_message.dart +++ b/app/lib/views/widgets/new_message.dart @@ -7,7 +7,6 @@ import 'package:glowby/views/widgets/paint_window.dart'; import 'package:glowby/utils/utils.dart'; import 'message.dart'; -import '../../services/openai_api.dart'; import '../../utils/timestamp.dart'; import 'package:flutter/material.dart'; @@ -211,63 +210,7 @@ class NewMessageState extends State { } Future handleImageGenerationCommand(String message) async { - final pattern = Utils.getMatchingPattern(message); - final description = pattern != null - ? message.replaceAll(RegExp(pattern, caseSensitive: false), '').trim() - : ''; - //print('description: $description'); - //print('enableAi: ${widget._enableAi}'); - if (description.isNotEmpty && - (widget._enableAi == null || widget._enableAi!)) { - Message drawingMessage = Message( - text: Utils.getRandomImageGenerationFunnyMessage(), - createdAt: Timestamp.now(), - userId: Ai.defaultUserId, - username: widget._name == '' ? 'AI' : widget._name, - ); - widget._messages.insert(0, drawingMessage); - widget._refresh(); - - // Generate the image - try { - final imageUrl = (await OpenAiApi.generateImageUrl(description))!; - Message message = Message( - text: 'Here is your image!', - createdAt: Timestamp.now(), - userId: Ai.defaultUserId, - username: widget._name == '' ? 'AI' : widget._name, - link: imageUrl, - ); - - widget._messages.remove(drawingMessage); - widget._messages.insert(0, message); - widget._messages.insert( - 0, - Message( - text: Utils.getRandomImageReadyMessage(), - createdAt: Timestamp.now(), - userId: Ai.defaultUserId, - username: widget._name == '' ? 'AI' : widget._name, - )); - - widget._refresh(); - - Utils.downloadImage(imageUrl, description); - } catch (e) { - // Handle the exception and emit an error state - widget._messages.remove(drawingMessage); - Message message = Message( - text: 'Something went wrong. Please try again later.', - createdAt: Timestamp.now(), - userId: Ai.defaultUserId, - username: widget._name == '' ? 'AI' : widget._name, - ); - - widget._messages.remove(drawingMessage); - widget._messages.insert(0, message); - widget._refresh(); - } - } + // not supported yet } void _stopProcessing() { @@ -352,7 +295,6 @@ class NewMessageState extends State { ), onPressed: _voiceMessage, ), - if (_isProcessing) IconButton( color: Theme.of(context).primaryColor, diff --git a/app/lib/views/widgets/paint_window.dart b/app/lib/views/widgets/paint_window.dart deleted file mode 100644 index f1f3b2c..0000000 --- a/app/lib/views/widgets/paint_window.dart +++ /dev/null @@ -1,392 +0,0 @@ -import 'dart:async'; -import 'dart:convert'; -import 'dart:math'; - -import 'package:flutter/material.dart'; -import 'package:flutter_image_compress/flutter_image_compress.dart'; -import 'package:glowby/views/dialogs/ai_error_dialog.dart'; -import 'package:glowby/views/html/html_view_screen.dart'; -import 'dart:ui' as ui; - -import 'package:glowby/services/openai_api.dart'; -import 'package:glowby/utils/utils.dart'; -import 'package:flutter/foundation.dart'; - -class PaintWindow extends StatefulWidget { - const PaintWindow({super.key}); - - @override - PaintWindowState createState() => PaintWindowState(); -} - -class PaintWindowState extends State { - final int width = 600; - final int height = 450; - - List points = []; - final TextEditingController nameController = TextEditingController(); - String creationName = ''; - bool isLoading = false; - Uint8List? imgBytes; - ui.Image? drawingImage; - - @override - void dispose() { - nameController.dispose(); - super.dispose(); - } - - // This function converts the drawing (list of points) to a base64 string - Future convertToBase64JpegMobile(List points) async { - // Create a picture recorder to record the canvas operations - final ui.PictureRecorder recorder = ui.PictureRecorder(); - final Canvas canvas = Canvas(recorder); - - // Draw your points here onto the canvas - final paint = Paint() - ..color = Colors.black - ..strokeCap = StrokeCap.round - ..strokeWidth = 2.0; - for (int i = 0; i < points.length - 1; i++) { - if (points[i] != null && points[i + 1] != null) { - canvas.drawLine(points[i]!, points[i + 1]!, paint); - } - } - - // End recording the canvas operations - final ui.Picture picture = recorder.endRecording(); - - // Convert the picture to an image - final ui.Image image = await picture.toImage( - width, height); // Set the width and height as needed - final ByteData? byteData = - await image.toByteData(format: ui.ImageByteFormat.rawRgba); - - if (byteData == null) { - if (kDebugMode) { - print("Failed to obtain byte data from image"); - } - return ''; - } - - // Compress the image and get JPEG format Uint8List - final Uint8List imgBytes = await FlutterImageCompress.compressWithList( - byteData.buffer.asUint8List(), - minWidth: width, - minHeight: height, - quality: 100, // Adjust the quality as needed - format: CompressFormat.jpeg, - ); - - // Base64 encode the JPEG bytes - final String base64String = base64Encode(imgBytes); - - return base64String; - } - - // This function converts the drawing (list of points) to a base64 string - Future convertToBase64Png(List points) async { - // Create a picture recorder to record the canvas operations - final ui.PictureRecorder recorder = ui.PictureRecorder(); - final Canvas canvas = Canvas(recorder); - - // Draw your points here onto the canvas - final paint = Paint() - ..color = Colors.black - ..strokeCap = StrokeCap.round - ..strokeWidth = 2.0; - for (int i = 0; i < points.length - 1; i++) { - if (points[i] != null && points[i + 1] != null) { - canvas.drawLine(points[i]!, points[i + 1]!, paint); - } - } - - // End recording the canvas operations - final ui.Picture picture = recorder.endRecording(); - - // Convert the picture to an image - final ui.Image image = await picture.toImage( - width, height); // Set the width and height as needed - final ByteData? byteData = - await image.toByteData(format: ui.ImageByteFormat.png); - - // Convert the byte data to a Uint8List - final Uint8List imgBytes = byteData!.buffer.asUint8List(); - - // Base64 encode the image bytes - final String base64String = base64Encode(imgBytes); - - return base64String; - } - - Future callOpenAI() async { - if (isLoading) { - return; - } - - setState(() { - isLoading = true; - }); - - // Convert points to a suitable format and call OpenAI method - // For example, you might convert points to an image and then to base64 - //String imageBase64 = await convertToBase64Jpeg(points); - - if (drawingImage != null) { - // Introduce a delay before executing the code - await Future.delayed(const Duration(milliseconds: 800)); - } - String imageBase64 = - await Utils.convertToBase64JpegWeb(points, drawingImage, width, height); - - // this is for testing - // imgBytes = base64Decode(imageBase64); // Implement this function - - String htmlResponse = - await OpenAiApi().getHtmlFromOpenAI(imageBase64, creationName); - - String htmlContent = creationName; - - if (htmlResponse == '') { - if (mounted) { - Navigator.of(context).pop(); - } - - _showAiErrorDialog(); - return; - } - - try { - htmlContent = htmlResponse.split("```html")[1].split('```')[0]; - } catch (e) { - htmlContent = htmlResponse; - } - - // Use the captured context after the async gap - if (mounted) { - Navigator.push( - context, // use the safeContext that was captured before the async gap - MaterialPageRoute( - builder: (context) => HtmlViewScreen( - htmlContent: htmlContent, - appName: creationName, - ), - ), - ); - } - - setState(() { - isLoading = false; - }); - - clear(); - } - - void _showAiErrorDialog() { - showDialog( - context: context, - builder: (BuildContext context) { - return const AiErrorDialog(); - }, - ).then( - (value) => setState(() {}), - ); - } - - void clear() { - drawingImage = null; - nameController.clear(); - setState(() { - points.clear(); - }); - } - - Future loadImage(Uint8List imageBytes) async { - final Completer completer = Completer(); - ui.decodeImageFromList(imageBytes, (ui.Image img) { - if (!completer.isCompleted) { - completer.complete(img); - } - }); - drawingImage = await completer.future; - setState(() {}); // Trigger a repaint - } - - Future uploadImage() async { - try { - final Uint8List? value = await Utils.pickImage(); - if (value != null) { - await loadImage(value); // Load the image and update the state - } - } catch (e) { - // Handle the error or display an error message - if (kDebugMode) { - print('Error picking image: $e'); - } - } - - /*final picker = ImagePicker(); - final pickedFile = await picker.getImage(source: ImageSource.gallery); - - if (pickedFile != null) { - // If the pickedFile is not null, then we have a path to the image file. - // You can now upload this image to a server or use it in your application. - final File imageFile = File(pickedFile.path); - // Implement your image upload functionality here - } else { - print('No image selected.'); - }*/ - } - - @override - Widget build(BuildContext context) { - if (imgBytes != null) { - return Image.memory(imgBytes!); - } - - return AlertDialog( - title: const Text('Magic Window (Powered by GPT-4 with Vision)'), - content: SingleChildScrollView( - child: Column( - mainAxisSize: MainAxisSize.min, - children: [ - Container( - width: width.toDouble(), - height: height.toDouble(), - decoration: BoxDecoration( - border: Border.all(color: Colors.black), - color: Colors.white, - ), - child: GestureDetector( - onPanUpdate: (DragUpdateDetails details) { - setState(() { - RenderBox renderBox = - context.findRenderObject() as RenderBox; - points.add(renderBox.globalToLocal(details.localPosition)); - }); - }, - onPanEnd: (DragEndDetails details) { - setState(() { - points.add( - null); // Add a null to the list to separate the lines - }); - }, - child: CustomPaint( - painter: DrawingPainter(points: points, image: drawingImage), - size: Size.infinite, - ), - ), - ), - Padding( - padding: const EdgeInsets.all(8.0), - child: SizedBox( - width: width.toDouble(), // Set your desired maximum width here - child: TextField( - controller: nameController, - decoration: const InputDecoration( - labelText: 'Name your creation', - ), - onChanged: (value) { - creationName = value; - }, - ), - ), - ), - Row( - mainAxisAlignment: MainAxisAlignment.start, - children: [ - IconButton( - icon: const Icon(Icons.image), - onPressed: uploadImage, - ), - ], - ), - ], - ), - ), - actions: [ - if (isLoading && drawingImage != null) - const Text('Loading...') - else if (isLoading) - const CircularProgressIndicator() - else - TextButton( - onPressed: clear, - child: const Text('Clear'), - ), - TextButton( - onPressed: callOpenAI, - child: const Text( - 'Build'), // Here we call the method to process the drawing - ), - TextButton( - child: const Text('Close'), - onPressed: () { - Navigator.of(context).pop(); - }, - ), - ], - ); - } -} - -class DrawingPainter extends CustomPainter { - final List points; - final ui.Image? image; - - DrawingPainter({required this.points, this.image}); - - void paintImage( - {required Canvas canvas, required ui.Image image, required Size size}) { - // Calculate the scale factor to fit the image within the canvas if needed - final double scaleFactor = - min(size.width / image.width, size.height / image.height); - - // Calculate the destination rectangle for the scaled image - final Rect destRect = Rect.fromLTWH( - (size.width - image.width * scaleFactor) / 2, - (size.height - image.height * scaleFactor) / 2, - image.width * scaleFactor, - image.height * scaleFactor, - ); - - // Draw the scaled image at the center position - canvas.drawImageRect( - image, - Rect.fromLTWH(0, 0, image.width.toDouble(), image.height.toDouble()), - destRect, - Paint()); - } - - @override - void paint(Canvas canvas, Size size) { - // If there's an image, draw it - if (image != null) { - paintImage(canvas: canvas, image: image!, size: size); - } - - var paint = Paint() - ..color = Colors.black - ..strokeCap = StrokeCap.round - ..strokeWidth = 2.0; - - for (int i = 0; i < points.length - 1; i++) { - if (points[i] != null && points[i + 1] != null) { - // Check if both points are within the bounds of the CustomPaint widget - if (points[i]!.dx >= 0 && - points[i]!.dx <= size.width && - points[i]!.dy >= 0 && - points[i]!.dy <= size.height && - points[i + 1]!.dx >= 0 && - points[i + 1]!.dx <= size.width && - points[i + 1]!.dy >= 0 && - points[i + 1]!.dy <= size.height) { - canvas.drawLine(points[i]!, points[i + 1]!, paint); - } - } - } - } - - @override - bool shouldRepaint(covariant CustomPainter oldDelegate) => true; -}