Skip to content

Commit

Permalink
Version 5.5
Browse files Browse the repository at this point in the history
  • Loading branch information
Lin-Rexter committed Oct 7, 2024
1 parent ca20817 commit a048cb9
Show file tree
Hide file tree
Showing 19 changed files with 495 additions and 227 deletions.
1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
"kepler.gl": "^3.0.0",
"mapbox-gl": "^3.6.0",
"maplibre-gl": "^4.7.1",
"marked": "^14.1.2",
"next": "14.2.5",
"next-themes": "^0.3.0",
"nextjs-toploader": "^1.6.12",
Expand Down
16 changes: 13 additions & 3 deletions pnpm-lock.yaml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

63 changes: 51 additions & 12 deletions src/app/api/ai/gemini.js
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
import { GoogleGenerativeAI, HarmCategory, HarmBlockThreshold } from "@google/generative-ai";


const apiKey = process.env.NEXT_PUBLIC_GEMINI_API_KEY;;
const genAI = new GoogleGenerativeAI(apiKey);

const model = genAI.getGenerativeModel({
model: "gemini-1.5-flash-002",
model: "gemini-1.5-pro-002",
});

const generationConfig = {
Expand All @@ -15,16 +16,54 @@ const generationConfig = {
responseMimeType: "text/plain",
};

const chatSession = model.startChat({
generationConfig,
// safetySettings: Adjust safety settings
// See https://ai.google.dev/gemini-api/docs/safety-settings
history: [
],
});


export async function gemini_ask(ask) {
const chatSession = model.startChat({
generationConfig,
// safetySettings: Adjust safety settings
// See https://ai.google.dev/gemini-api/docs/safety-settings
history: [
],
});

const result = await chatSession.sendMessage(ask);
//console.log(result.response.text());
return result.response.text()
var response_dict = {
data: null,
error: null
}


try{
const result = await chatSession.sendMessage(ask);

response_dict.data = result.response.text()
}catch(e){
console.log(e.message)
response_dict.error = e.message
}

/*
// Llama 3.2
const response = await fetch(
"https://api-inference.huggingface.co/models/meta-llama/Llama-3.1-8B",
{
headers: {
Authorization: "Bearer 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",
"Content-Type": "application/json",
},
method: "POST",
body: JSON.stringify({ "inputs": ask }),
}
);
const result = await response.json();
if (result?.error) {
response_dict.error = result.error
}else{
response_dict.data = result
}
*/

return response_dict
}
1 change: 0 additions & 1 deletion src/app/layout.jsx
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ import NextTopLoader from 'nextjs-toploader';
import { Providers } from './providers'
import Loading from "./loading";


const DynamicHeader = dynamic(
() => import('@/components/Header'),
{
Expand Down
Loading

0 comments on commit a048cb9

Please sign in to comment.