Satyam-Singh commited on
Commit
816807a
·
verified ·
1 Parent(s): f16a38d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -12
app.py CHANGED
@@ -1,9 +1,10 @@
1
- import gradio as gr
2
  import google.generativeai as genai
 
 
 
3
 
4
- genai.configure(api_key="KEY")
5
 
6
- # Set up the model
7
  generation_config = {
8
  "temperature": 0.9,
9
  "top_p": 1,
@@ -11,19 +12,30 @@ generation_config = {
11
  "max_output_tokens": 2048,
12
  }
13
 
14
- model = genai.GenerativeModel(model_name="gemini-pro",
15
- generation_config=generation_config)
 
 
 
16
 
17
  def chat(prompt):
18
  convo = model.start_chat(history=[])
19
  convo.send_message(prompt)
20
  return convo.last.text
21
 
22
- # Create the Gradio interface
23
- chat_interface = gr.Interface(
24
- fn=chat,
25
- inputs=gr.Textbox(lines=2, placeholder="Enter your prompt..."),
26
- outputs=gr.Textbox(lines=2, placeholder="Bot response..."),
27
- )
 
28
 
29
- chat_interface.launch()
 
 
 
 
 
 
 
 
 
1
  import google.generativeai as genai
2
+ import gradio as gr
3
+ import os
4
+ import random
5
 
6
+ genai.configure(api_key=os.getenv("KEY"))
7
 
 
8
  generation_config = {
9
  "temperature": 0.9,
10
  "top_p": 1,
 
12
  "max_output_tokens": 2048,
13
  }
14
 
15
+ emoji = ['1.png','2,png','3.png','4.png','5.png']
16
+
17
+ a = ramdom.choice(emoji)
18
+
19
+ model = genai.GenerativeModel(model_name="gemini-pro",generation_config=generation_config)
20
 
21
  def chat(prompt):
22
  convo = model.start_chat(history=[])
23
  convo.send_message(prompt)
24
  return convo.last.text
25
 
26
+ '''
27
+ def generate(prompt, history):
28
+ response = genai.chat(
29
+ messages=prompt)
30
+
31
+ return response.last'''
32
+
33
 
34
+ llava=gr.ChatInterface(
35
+ fn=generate,
36
+ chatbot=gr.Chatbot(show_label=False, avatar_images=(a, 'llava-logo.svg'), show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
37
+ title="LLaVa-2",
38
+ description="This Is Official Demo Of ```LLaVa-2```. ```History/context``` memory does not work in this demo",
39
+ concurrency_limit=20,#layout="vertical",#bubble_full_width=False
40
+ )
41
+ llava.launch(share=True,show_api=False)