From a9110296729a22ba7823dac8660e78cc3bcd24a1 Mon Sep 17 00:00:00 2001 From: Mark McDonald Date: Fri, 10 May 2024 17:07:01 +0800 Subject: [PATCH] Add workshop files for Workspace / Apps-script lab These reflect the finished codelab that we're presenting at I/O 2024. --- .../CollegeExpenses.xlsx | Bin 0 -> 6611 bytes .../Gemini-blog.txt | 108 ++++++ .../README.md | 10 + .../Apps_script_and_Workspace_codelab/main.gs | 40 +++ .../utils.gs | 323 ++++++++++++++++++ 5 files changed, 481 insertions(+) create mode 100644 examples/Apps_script_and_Workspace_codelab/CollegeExpenses.xlsx create mode 100644 examples/Apps_script_and_Workspace_codelab/Gemini-blog.txt create mode 100644 examples/Apps_script_and_Workspace_codelab/README.md create mode 100644 examples/Apps_script_and_Workspace_codelab/main.gs create mode 100644 examples/Apps_script_and_Workspace_codelab/utils.gs diff --git a/examples/Apps_script_and_Workspace_codelab/CollegeExpenses.xlsx b/examples/Apps_script_and_Workspace_codelab/CollegeExpenses.xlsx new file mode 100644 index 0000000000000000000000000000000000000000..53ecf5fb4c0ef21d26f4c2613d99d29d6daf4704 GIT binary patch literal 6611 zcma)A1z40@w}zp+Q%dRX5|9*8q(M5RYbasp?l>?sl0&C7g4ECrB7$^x2?#3XKj^u? z9PT;i-Zk^g_su-B*YoYQ-*>HdEoFHGL_#=JR8+WRtP(A_TSf@G2ibDIv@~{dasGAT z<^b8*?rUz_&kEuC1bCyA$ND~&njSX0@4Ead4I&YVtT6L~kH5Nc`F?CIYnyjyUS41L z2Smx5S&m6S(S2c(Hesf$mg0Q5Nh7u?>vU}FYCiaG7wAWmbf3o7PvQy$yG1NTW9U8G z$hPO^$Gv2O(_nHp4Haax9c6*SRhMnOL;{m(e7ZW6CP1)fz5w`PdKwZl=2b{jK20Z^ z2m{zDQQ&xw>=+vU!rwgtpr4cuZ8T>^8+go)jQ9%GPycwJer7HHGHzn7ti-#(ESP0O z&!GYBnF#-`V-pVx&}C14B&xp2#eg9>VcmxLAu4j{*Cfaq^L*c;2+pS5=LCUGW`FtPOa$3>u^y3J)et(#?yrh?RYM65T}_bUv>H zVBj&Bb7!H&moXn_QG927tgwdtI`jp-y_#frBafot(>hMQPBJXS>SJJ9@ABj5G|PU4 zf|PGA@K5W~Fh;?Xc4F2TOc2I^+ThNx7UP4sn%BPj5Iw1ZC^rr9_GpH&jV&a&+ zwVwxhJbU~DtI(h!kfFn+W39)X=Awr}27Z8HU)KO)Rl5{!XjSnnN46kan-d9z9ai$pGMqSc9#Zd&L-$6+h}N}bGR0A~tx zLd+%Qq~!xeDAZvyj$D#&)6kmU&CmZB2wX>)idu1h*{(x2Zh2oH)$*z}3`Lltg(^Cx z`n|Etwd<;or?;0EpLO6O4u^+8xo=Sq4@)KFrHk+iN110hbh?G9 zl}{+J*w;ruD9=|Nr(M%tocTS4?U=yIH+SiCUu4Ebcx8DM)UX21YaIkQI5liIIOTuj zCh0G3nmQS~S=n1SbN+erlb;z0v-Y!Gm?2q4hcsKQaX6=7vZ}}&$x!&t7e=hE<^4`< zGAPoj_?!Drs1tJ#%rP< zi4tW`09kK4an?g$X-;2m2@pfw_njOt=0J-ZK|+%bkLNpT^`vVBUZ~(y%!3EUX=4-V zo)UV~Rn(=ws92r@_Mm2hz96STq(V;)2YM#zjJ#fcGH~2~61EQZ*b26h*#$(;$70zO zomxnI4ar8V=}Dd8Oi@u|@rMuk=0o@SfSR_I92}=)dP<(Rpb-OcZ2%tn%}PmMqcUGw z6|otWlsDzDCuZx)YxucrM=t2=zmQ2s>_?7>7agf6pz~M7{(!l9)t7%+ru}k{>EmS4 zdlT8D{xn2hOYxnisCbt|(@iZ-EMuD(GcWO-7RoLk>FR-`X<$+>#oMg|uuF_vricnOX$D01K8l+4ck|2qRK#!K*CYY7IMuw=OX>zJ}1-PM&32%jA zHEe@)urU0ev~dGE**II8nZe51&&!{*S&Yf0A?(RAT7-fO@n#iua}}#Ur0&frFkEb= zNtWf&AXm;b7@P@obSX-0^Ph+sK2#yzJhuI#g_lfN<)H#E0d zR3q0AHi0}S_N5!}9cra7kM-2Rhln8*sqZA~OmV}ZCkN3Ig{7&R7vh3x?UI~O)Y;Jn z%ENeZgO?GO!d_fUgsqEVst^*!m$zdvCQ{LWx$>gDHO6%^teRgh33aP7scgYv_&gv( zA&tc*u(9+u3tRQ1aHRXHAjaD_^Y&YI)K2c{I#=if(3B{Pl-DA6s%zu&Tl=(dV_@Wk zmr_I{{A<>)lzP|HXd!SN_a*=eHjZuVL6qT(p^k!1)^TiAeS)dLz zI5$v4pWwQp`$V0#JKhCZGCo*2guLn>G^*P>(&92(WSr67@=Q+yIvC4NOjl95Zk)8E zL$hmR>g96T;vlC(ZZ$ke9apCBcr+@RoU?h}OA|9JjmZzctj;jqr?X)Vm&MK_)N9#mLk?tRM4- z3Ups}*R64o zALWo~-7;+zJxDMML_;J&>Le6$wh<<*9fk0x$#*%2t*C#)x!^JQ!$vIzZp;KahwGV^ zPOt^BjDGl=h*N@_7?c<7D%8M8$MUbFBlwkfTrAD(%s793{^Z-R=B)i34enLIHNr{@ zbgG5yz^~pB=8&fGBK!OMXYja3=LO1$- zJSJXZ5_idzbK-T;PE`f>D5)4~B0Jf*m)g7u%L&k#thOHUKOlTG??i>ngZd+a2qN;q z9N=19T&{HH=+Gcs=A{j~<0SC7bV7*Vv2=DKPUUio{xzOx>#Qm^6_>uW{12KLJqWVI zI&Q;M8@VPzhvz2!qr#PYD;{3tet{B?x%Fn9iLVz*tB{Mxu^WMJ#HLO~K!J;t){TwR zI{H2h-JS@|yZW474S`>En#J*VZ7rrLn-^CsIzbkK@Ak_Ar?P_z_I&*6?G_j9ZCnt- zozBu#x>6`l!)g** zKCrNJ^B{aQwwNz!+odhejZuAua~uwSBPEY7tkBuQvlH%W4c4Qx*5aIkc2n$fFEw6f z46Ma2NwL4&B{ES_>je-tDu<#eI_>6B)BxQQ^QYsRdT8H zQOq;H5Il`FONIvA&L|(J#Orb0jo_*`4v-!f%xat*Tn80rsF0m>drpvJ9U>1j@qR8E zN#A8ngCnsaE#7^ORU>tTMl9`TRYt|=Uq)~xE+`KTo->Y9=ejD65SOQ4_A5x8*)=C) zvZ~tk^&AHQ zX@Qi$G^U!p?SVjK%Wlb|2wG0jE5Dq_0lP6K1{*|rf7}z6rO>ewXW>igcn9*0sko?C zI~-59q&ex=-MdO_&KaQ8T>oR?)nM?cS2D;`nk$dJC%2&cwLoC~gAN0#R%i2(sF}>>Kw9%^ zZkLr-D$4cY9uFZqf0~NR`>_As;BgPRqlUYON2O;EG6XD^)c6oHXIa3o|K{;UQ zAp_?`_k;wV9e)OyGsTJ-SN4{ANxyJ<2gXUjv&uyz!)uo}ePhG9y4W@25;X@z<8FU|n$cd)eabJ@^a(%aRDDwBlN>!!Z%>+api4O_ zN1aF)S=y8L_>Rw!Fj1%WZT%x@DHJj2>&k{|V=4MeAV|WpO;VbuBqDS^m?tE{6s4gB zt(BGh0(^y0X(OcpR&3;%j=7Te?-|?bpK;(O0rOvRkl-S=>uS%;G+%s_JK46=VmM@s zM}_doioGD$RK<*?Hgd?Tx<;Vlt^vKDpk|_rM=w&yY-fwS>}^EaprflG>>g^oT=!rs zlwuhrQSNkW?d9e8IKQS>sO%9V)H^Vut~H~N znt#{(rRJfW9V@PrUg@;vwuGCmZm*AsOD?~IevQ2u28|FJ7c{ZoNO*x;pN9H~V`SB? zlmID@djZMcv)#4N%T4{da)E_~YLX|nXV#U8Oy{laKt4BE`m^q`hk_2ZtD9Bn=K2Uq zV~yru9qAi_AK)~Vqm3WApv~h8JlE;~qj`Q&MwS!jLv{TkV6DnKYQBpFkub+jdVjZ= z5Xo4<1Tq$>z3e~O&8M;1DIlF~YUhYIz)MFZBYXtY2`m4q6L5Y3??xw>0D(3)tjz$+ z+rxBth|-vd%tGKx8P+hlPwI)V_Y770Cjyh@C#J9Y=33)pWgxbYipzB1qv;>^i{u%6 zOMy-TjJBy@#4-ofX*I>xwguvQ;Gn+pwHfWcej>m$iPAgj&Cw^DBWRPevGFWD4|Dm# z&@pl3H3Pfj$K5+=Ij+J(W)PBd0AaEpG}2m?I}_;zr8YLX^t4&v-ze%~egXx8#zZ0B zMe3BYX;#z@vS^0^tgEU*ys={v%9co9q+W~Mr`;eWI~Yy&@b4pMk9k%K+HYZa9v>gs zFx{^8oLpZa&R(v@}!xX%2~d_?Tp>?nmRl`p-Q}n-`6YhyN=#-I$j*!t3KQ~ zlZe^S!tRRAP~*LEGVYXU^qnGqx}a*~41t!zk0uyDySj(-+|;Vo79BwH;VI8ez!&=ZImz{_!hZf2vqX>;NuYq%m0MXnv;Z zU>ucG$<1JkMU`DIeP5!`sq z7#xO*4m)EtlTkO^t2$;*q5c$mDe<@M_$GmVk7V7}M2n~_$atJAKNQf452nZh-^N=f zAb;l={W{lnPuV-A>^;CM7sF_O=QH)|CVOSjgI8Jad#H&F!|CLKTq~R)!l9t1At(cQLbfF;sVVFmu+wA!(huydn%0V(@NZ z&5!GJiBWS3^gn9@@Ymr2#jMI6)~`GIHdugb<UeTYRK zda?h|hgx;KFX%}{_C2e*`GKQbCrk|#CIa4F=#v~II9C@*O@z3)HRL=Os?QlB3Mm6y zhFvSJ#~O|{x`wulWEfoL$xH0=cGcizi#0&&8BZsd+UTS-w4)ieUUjmAO{BQgX*YWM z?c_@B=WJRf2RO&Lif4pRvA{b6GF0HqMU;=RW6hggC<@XBK5RmTIPpe<>C$N^VKIU7 z8>60~olucAK)ANXNdb1I7mBjsNZP{_#&GEf$}w?HkKwli$YnqFTYW<+AMTVMyy3a{ z*}OmQ8=sVdg+CGT25s;FLb%@(>D%+L8-epz`@8w{T@SaXBR6xi-$NB&(6)=aln#SMl;$1JdTd==AmJHfmFMr#5 z-gS6eYTkhUx5=UZba=OqyX)Y#()#OxRl(#HY+wINpWgLwyJX%dvfp+B)4w+!{;SXK zF1?-UZ))mqv&H*wIR2-?-d%h<>D?5A-}V{*=i+}T4R`(A&L}r2^0z&J1>{!3yu0u= f5B?f^ETX?nKg#k*Fs#7AVZvVQFfhjw-+cQI-v;;n literal 0 HcmV?d00001 diff --git a/examples/Apps_script_and_Workspace_codelab/Gemini-blog.txt b/examples/Apps_script_and_Workspace_codelab/Gemini-blog.txt new file mode 100644 index 000000000..bad10d626 --- /dev/null +++ b/examples/Apps_script_and_Workspace_codelab/Gemini-blog.txt @@ -0,0 +1,108 @@ +Our next-generation model: Gemini 1.5 +Feb 15, 2024 + +8 min read + +The model delivers dramatically enhanced performance, with a breakthrough in long-context understanding across modalities. + +Sundar Pichai +CEO of Google and Alphabet +Demis Hassabis +CEO of Google DeepMind + +A note from Google and Alphabet CEO Sundar Pichai: + +Last week, we rolled out our most capable model, Gemini 1.0 Ultra, and took a significant step forward in making Google products more helpful, starting with Gemini Advanced. Today, developers and Cloud customers can begin building with 1.0 Ultra too — with our Gemini API in AI Studio and in Vertex AI. + +Our teams continue pushing the frontiers of our latest models with safety at the core. They are making rapid progress. In fact, we’re ready to introduce the next generation: Gemini 1.5. It shows dramatic improvements across a number of dimensions and 1.5 Pro achieves comparable quality to 1.0 Ultra, while using less compute. + +This new generation also delivers a breakthrough in long-context understanding. We’ve been able to significantly increase the amount of information our models can process — running up to 1 million tokens consistently, achieving the longest context window of any large-scale foundation model yet. + +Longer context windows show us the promise of what is possible. They will enable entirely new capabilities and help developers build much more useful models and applications. We’re excited to offer a limited preview of this experimental feature to developers and enterprise customers. Demis shares more on capabilities, safety and availability below. + +— Sundar + +Introducing Gemini 1.5 +By Demis Hassabis, CEO of Google DeepMind, on behalf of the Gemini team + +This is an exciting time for AI. New advances in the field have the potential to make AI more helpful for billions of people over the coming years. Since introducing Gemini 1.0, we’ve been testing, refining and enhancing its capabilities. + +Today, we’re announcing our next-generation model: Gemini 1.5. + +Gemini 1.5 delivers dramatically enhanced performance. It represents a step change in our approach, building upon research and engineering innovations across nearly every part of our foundation model development and infrastructure. This includes making Gemini 1.5 more efficient to train and serve, with a new Mixture-of-Experts (MoE) architecture. + +The first Gemini 1.5 model we’re releasing for early testing is Gemini 1.5 Pro. It’s a mid-size multimodal model, optimized for scaling across a wide-range of tasks, and performs at a similar level to 1.0 Ultra, our largest model to date. It also introduces a breakthrough experimental feature in long-context understanding. + +Gemini 1.5 Pro comes with a standard 128,000 token context window. But starting today, a limited group of developers and enterprise customers can try it with a context window of up to 1 million tokens via AI Studio and Vertex AI in private preview. + +As we roll out the full 1 million token context window, we’re actively working on optimizations to improve latency, reduce computational requirements and enhance the user experience. We’re excited for people to try this breakthrough capability, and we share more details on future availability below. + +These continued advances in our next-generation models will open up new possibilities for people, developers and enterprises to create, discover and build using AI. + +Context lengths of leading foundation models + +Highly efficient architecture +Gemini 1.5 is built upon our leading research on Transformer and MoE architecture. While a traditional Transformer functions as one large neural network, MoE models are divided into smaller "expert” neural networks. + +Depending on the type of input given, MoE models learn to selectively activate only the most relevant expert pathways in its neural network. This specialization massively enhances the model’s efficiency. Google has been an early adopter and pioneer of the MoE technique for deep learning through research such as Sparsely-Gated MoE, GShard-Transformer, Switch-Transformer, M4 and more. + +Our latest innovations in model architecture allow Gemini 1.5 to learn complex tasks more quickly and maintain quality, while being more efficient to train and serve. These efficiencies are helping our teams iterate, train and deliver more advanced versions of Gemini faster than ever before, and we’re working on further optimizations. + +Greater context, more helpful capabilities +An AI model’s “context window” is made up of tokens, which are the building blocks used for processing information. Tokens can be entire parts or subsections of words, images, videos, audio or code. The bigger a model’s context window, the more information it can take in and process in a given prompt — making its output more consistent, relevant and useful. + +Through a series of machine learning innovations, we’ve increased 1.5 Pro’s context window capacity far beyond the original 32,000 tokens for Gemini 1.0. We can now run up to 1 million tokens in production. + +This means 1.5 Pro can process vast amounts of information in one go — including 1 hour of video, 11 hours of audio, codebases with over 30,000 lines of code or over 700,000 words. In our research, we’ve also successfully tested up to 10 million tokens. + +Complex reasoning about vast amounts of information +1.5 Pro can seamlessly analyze, classify and summarize large amounts of content within a given prompt. For example, when given the 402-page transcripts from Apollo 11’s mission to the moon, it can reason about conversations, events and details found across the document. + +Reasoning across a 402-page transcript: Gemini 1.5 Pro Demo +1:53 +Gemini 1.5 Pro can understand, reason about and identify curious details in the 402-page transcripts from Apollo 11’s mission to the moon. + +Better understanding and reasoning across modalities +1.5 Pro can perform highly-sophisticated understanding and reasoning tasks for different modalities, including video. For instance, when given a 44-minute silent Buster Keaton movie, the model can accurately analyze various plot points and events, and even reason about small details in the movie that could easily be missed. + +Multimodal prompting with a 44-minute movie: Gemini 1.5 Pro Demo +1:59 +Gemini 1.5 Pro can identify a scene in a 44-minute silent Buster Keaton movie when given a simple line drawing as reference material for a real-life object. + +Relevant problem-solving with longer blocks of code +1.5 Pro can perform more relevant problem-solving tasks across longer blocks of code. When given a prompt with more than 100,000 lines of code, it can better reason across examples, suggest helpful modifications and give explanations about how different parts of the code works. + +Problem solving across 100,633 lines of code | Gemini 1.5 Pro Demo +3:15 +Gemini 1.5 Pro can reason across 100,000 lines of code giving helpful solutions, modifications and explanations. + +Enhanced performance +When tested on a comprehensive panel of text, code, image, audio and video evaluations, 1.5 Pro outperforms 1.0 Pro on 87% of the benchmarks used for developing our large language models (LLMs). And when compared to 1.0 Ultra on the same benchmarks, it performs at a broadly similar level. + +Gemini 1.5 Pro maintains high levels of performance even as its context window increases. In the Needle In A Haystack (NIAH) evaluation, where a small piece of text containing a particular fact or statement is purposely placed within a long block of text, 1.5 Pro found the embedded text 99% of the time, in blocks of data as long as 1 million tokens. + +Gemini 1.5 Pro also shows impressive “in-context learning” skills, meaning that it can learn a new skill from information given in a long prompt, without needing additional fine-tuning. We tested this skill on the Machine Translation from One Book (MTOB) benchmark, which shows how well the model learns from information it’s never seen before. When given a grammar manual for Kalamang, a language with fewer than 200 speakers worldwide, the model learns to translate English to Kalamang at a similar level to a person learning from the same content. + +As 1.5 Pro’s long context window is the first of its kind among large-scale models, we’re continuously developing new evaluations and benchmarks for testing its novel capabilities. + +For more details, see our Gemini 1.5 Pro technical report. + +Extensive ethics and safety testing +In line with our AI Principles and robust safety policies, we’re ensuring our models undergo extensive ethics and safety tests. We then integrate these research learnings into our governance processes and model development and evaluations to continuously improve our AI systems. + +Since introducing 1.0 Ultra in December, our teams have continued refining the model, making it safer for a wider release. We’ve also conducted novel research on safety risks and developed red-teaming techniques to test for a range of potential harms. + +In advance of releasing 1.5 Pro, we've taken the same approach to responsible deployment as we did for our Gemini 1.0 models, conducting extensive evaluations across areas including content safety and representational harms, and will continue to expand this testing. Beyond this, we’re developing further tests that account for the novel long-context capabilities of 1.5 Pro. + +Build and experiment with Gemini models +We’re committed to bringing each new generation of Gemini models to billions of people, developers and enterprises around the world responsibly. + +Starting today, we’re offering a limited preview of 1.5 Pro to developers and enterprise customers via AI Studio and Vertex AI. Read more about this on our Google for Developers blog and Google Cloud blog. + +We’ll introduce 1.5 Pro with a standard 128,000 token context window when the model is ready for a wider release. Coming soon, we plan to introduce pricing tiers that start at the standard 128,000 context window and scale up to 1 million tokens, as we improve the model. + +Early testers can try the 1 million token context window at no cost during the testing period, though they should expect longer latency times with this experimental feature. Significant improvements in speed are also on the horizon. + +Developers interested in testing 1.5 Pro can sign up now in AI Studio, while enterprise customers can reach out to their Vertex AI account team. + +Learn more about Gemini’s capabilities and see how it works. \ No newline at end of file diff --git a/examples/Apps_script_and_Workspace_codelab/README.md b/examples/Apps_script_and_Workspace_codelab/README.md new file mode 100644 index 000000000..8d3aa2e3e --- /dev/null +++ b/examples/Apps_script_and_Workspace_codelab/README.md @@ -0,0 +1,10 @@ +# Gemini API and Google Workspace Codelab + +These are the final, accompanying files for the [Automate Google Workspace tasks with the Gemini API +](https://codelabs.developers.google.com/codelabs/gemini-workspace) codelab. This codelabs shows you how to +the Gemini API using Apps Script, and uses the function calling, vision and text capabilities to automate +Google Workspace tasks. + +Please read and follow along with the main codelab, and if you get stuck you can load these files directly. + +This workshop was featured at [Google I/O 2024](https://io.google/2024/). diff --git a/examples/Apps_script_and_Workspace_codelab/main.gs b/examples/Apps_script_and_Workspace_codelab/main.gs new file mode 100644 index 000000000..02e8611db --- /dev/null +++ b/examples/Apps_script_and_Workspace_codelab/main.gs @@ -0,0 +1,40 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +function main() { + // const userQuery = "Set up a meeting at 5PM with Helen to discuss the news in the Gemini-1.5-blog.txt file."; + // const userQuery = "Draft an email for Mary with insights from the chart in the CollegeExpenses sheet."; + const userQuery = "Help me put together a deck about water conservation."; + + var tool_use = callGeminiWithTools(userQuery, WORKSPACE_TOOLS); + Logger.log(tool_use); + + if(tool_use['name'] == "setupMeeting") { + setupMeeting(tool_use['args']['time'], tool_use['args']['recipient'], tool_use['args']['filename']); + Logger.log("Your meeting has been set up."); + } + else if(tool_use['name'] == "draftEmail") { + draftEmail(tool_use['args']['sheet_name'], tool_use['args']['recipient']); + Logger.log("Check your Gmail to review the draft"); + } + else if(tool_use['name'] == 'createDeck') { + deckURL = createDeck(tool_use['args']['topic']); + Logger.log("Deck URL: " + deckURL); + } + else + Logger.log("no proper tool found") +} + diff --git a/examples/Apps_script_and_Workspace_codelab/utils.gs b/examples/Apps_script_and_Workspace_codelab/utils.gs new file mode 100644 index 000000000..0de0b5447 --- /dev/null +++ b/examples/Apps_script_and_Workspace_codelab/utils.gs @@ -0,0 +1,323 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +const properties = PropertiesService.getScriptProperties().getProperties(); +const geminiApiKey = properties['GOOGLE_API_KEY']; +const geminiEndpoint = `https://generativelanguage.googleapis.com/v1beta/models/gemini-1.0-pro-latest:generateContent?key=${geminiApiKey}`; +const geminiProVisionEndpoint = `https://generativelanguage.googleapis.com/v1beta/models/gemini-1.0-pro-vision-latest:generateContent?key=${geminiApiKey}`; + +const NUM_SLIDES = 3; + +const WORKSPACE_TOOLS = { + "function_declarations": [ + { + "name": "setupMeeting", + "description": "Sets up a meeting in Google Calendar.", + "parameters": { + "type": "object", + "properties": { + "time": { + "type": "string", + "description": "The time of the meeting." + }, + "recipient": { + "type": "string", + "description": "The name of the recipient." + }, + "filename": { + "type": "string", + "description": "The name of the file." + }, + }, + "required": [ + "time", + "recipient", + "filename" + ] + } + }, + { + "name": "draftEmail", + "description": "Write an email by analyzing data or charts in a Google Sheets file.", + "parameters": { + "type": "object", + "properties": { + "sheet_name": { + "type": "string", + "description": "The name of the sheet to analyze." + }, + "recipient": { + "type": "string", + "description": "The name of the recipient." + }, + }, + "required": [ + "sheet_name", + "recipient" + ] + } + }, + { + "name": "createDeck", + "description": "Build a simple presentation deck with Google Slides and return the URL.", + "parameters": { + "type": "object", + "properties": { + "topic": { + "type": "string", + "description": "The topic that the presentation is about." + }, + }, + "required": [ + "topic" + ] + } + }, + + // You add tools here. + ] +}; + +function callGemini(prompt, temperature=0) { + const payload = { + "contents": [ + { + "parts": [ + { + "text": prompt + }, + ] + } + ], + "generationConfig": { + "temperature": temperature, + }, + }; + + const options = { + 'method' : 'post', + 'contentType': 'application/json', + 'payload': JSON.stringify(payload) + }; + + const response = UrlFetchApp.fetch(geminiEndpoint, options); + const data = JSON.parse(response); + const content = data["candidates"][0]["content"]["parts"][0]["text"]; + return content; +} + +function testGemini() { + const prompt = "The best thing since sliced bread is"; + const output = callGemini(prompt); + console.log(prompt, output); +} + + +function callGeminiProVision(prompt, image, temperature=0) { + const imageData = Utilities.base64Encode(image.getAs('image/png').getBytes()); + + const payload = { + "contents": [ + { + "parts": [ + { + "text": prompt + }, + { + "inlineData": { + "mimeType": "image/png", + "data": imageData + } + } + ] + } + ], + "generationConfig": { + "temperature": temperature, + }, + }; + + const options = { + 'method' : 'post', + 'contentType': 'application/json', + 'payload': JSON.stringify(payload) + }; + + const response = UrlFetchApp.fetch(geminiProVisionEndpoint, options); + const data = JSON.parse(response); + const content = data["candidates"][0]["content"]["parts"][0]["text"]; + return content; +} + + +function testGeminiVision() { + const prompt = "Provide a fun fact about this object."; + const image = UrlFetchApp.fetch('https://storage.googleapis.com/generativeai-downloads/images/instrument.jpg').getBlob(); + const output = callGeminiProVision(prompt, image); + console.log(prompt, output); +} + +function callGeminiWithTools(prompt, tools, temperature=0) { + const payload = { + "contents": [ + { + "parts": [ + { + "text": prompt + }, + ] + } + ], + "tools" : tools, + "generationConfig": { + "temperature": temperature, + }, + }; + + const options = { + 'method' : 'post', + 'contentType': 'application/json', + 'payload': JSON.stringify(payload) + }; + + const response = UrlFetchApp.fetch(geminiEndpoint, options); + const data = JSON.parse(response); + const content = data["candidates"][0]["content"]["parts"][0]["functionCall"]; + return content; +} + +function testGeminiTools() { + const prompt = "Tell me how many days there are left in this month."; + const tools = { + "function_declarations": [ + { + "name": "datetime", + "description": "Returns the current date and time as a formatted string.", + "parameters": { + "type": "string" + } + } + ] + }; + const output = callGeminiWithTools(prompt, tools); + console.log(prompt, output); +} + +function attachFileToMeeting(event, file, fileName) { + // Get the iCal ID for the event. + const iCalEventId = event.getId(); + + // Log the ID and title for debugging. + console.log(`iCal event ID: ${iCalEventId}`); + console.log(`event Title: ${event.getTitle()}`); + + // Set up the options for listing the event with the advanced Google Calendar service. + const options = { + iCalUID: iCalEventId, + } + + // Use the primary calendar as the calendar ID to list events. + const calendarId = 'primary'; + + // Use the advanced Google Calendar service to list the event. + const calEvents = Calendar.Events.list(calendarId, options); + + // Get the Calendar ID used by the advanced Google Calendar service. + const eventId = calEvents.items[0].id; + + // Get the file URL for the attachment. + const fileUrl = file.getUrl(); + + // Set up the patch options to add the file. + var patch = { + attachments: [{ + 'fileUrl': fileUrl, + 'title': fileName + }] + } + + // Patch the event to add the file as an attachment. + Calendar.Events.patch(patch, 'primary', eventId, {"supportsAttachments": true}); +} + +function setupMeeting(time, recipient, filename) { + const files = DriveApp.getFilesByName(filename); + const file = files.next(); + const blogContent = file.getAs("text/*").getDataAsString() + + var geminiOutput = callGemini("Give me a really short title of this blog and a summary with less than three sentences. Please return the result as a JSON with two fields: title and summary. \n" + blogContent); + + // The Gemini model likes to enclose the JSON with ```json and ``` + geminiOutput = JSON.parse(geminiOutput.replace(/```(?:json|)/g, "")); + const title = geminiOutput['title'] + const fileSummary = geminiOutput['summary'] + + const event = CalendarApp.getDefaultCalendar().createEventFromDescription(`meet ${recipient} at ${time} to discuss "${title}"`); + event.setDescription(fileSummary); + attachFileToMeeting(event, file, filename) +} + +function draftEmail(sheet_name, recipient) { + + const prompt = `Compose the email body for ${recipient} with your insights for this chart. Use information in this chart only and do not do historical comparisons.`; + + var files = DriveApp.getFilesByName(sheet_name); + var sheet = SpreadsheetApp.openById(files.next().getId()).getSheetByName("Sheet1"); + var expenseChart = sheet.getCharts()[0]; + + var chartFile = DriveApp.createFile(expenseChart.getBlob().setName("ExpenseChart.png")) + var emailBody = callGeminiProVision(prompt, expenseChart) + GmailApp.createDraft(recipient+"@demo-email-provider.com", "College expenses", emailBody, { + attachments: [chartFile.getAs(MimeType.PNG)], + name: 'myname' + }); +} + +function createDeck(topic) { + const prompt = `I'm preparing a ${NUM_SLIDES}-slide deck to discuss ${topic}. Please help me brainstorm and generate main bullet points for each slide. Keep the title of each slide short. Please produce the result as a valid JSON so that I can pass it to other APIs.`; + + var geminiOutput = callGemini(prompt, 0.4); + // The Gemini model likes to enclose the JSON with ```json and ``` + geminiOutput = geminiOutput.replace(/```(?:json|)/g, ""); + const bulletPoints = JSON.parse(geminiOutput); + + // Create a Google Slides presentation. + const presentation = SlidesApp.create("My New Presentation"); + + // Set up the opening slide. + var slide = presentation.getSlides()[0]; + var shapes = slide.getShapes(); + shapes[0].getText().setText(topic); + + var body; + for (var i = 0; i < NUM_SLIDES; i++) { + slide = presentation.appendSlide(SlidesApp.PredefinedLayout.TITLE_AND_BODY); + shapes = slide.getShapes(); + // Set title. + shapes[0].getText().setText(bulletPoints['slides'][i]['title']); + + // Set body. + body = ""; + for (var j = 0; j < bulletPoints['slides'][i]['bullets'].length; j++) { + // Logger.log(j); + body += '* ' + bulletPoints['slides'][i]['bullets'][j] + '\n'; + } + shapes[1].getText().setText(body); + } + + return presentation.getUrl(); +} +