RamAnanth1 commited on
Commit
34e528b
·
1 Parent(s): 313437d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -3
app.py CHANGED
@@ -291,7 +291,6 @@ class BLIPVQA:
291
  class ConversationBot:
292
  def __init__(self):
293
  print("Initializing VisualChatGPT")
294
- self.llm = OpenAI(temperature=0)
295
  self.edit = ImageEditing(device="cuda")
296
  self.i2t = ImageCaptioning(device="cuda")
297
  self.t2i = T2I(device="cuda")
@@ -369,6 +368,9 @@ class ConversationBot:
369
  #description="useful when you want to generate a new real image from both the user desciption and a human pose image. like: generate a real image of a human from this human pose image, or generate a new real image of a human from this pose. "
370
  #"The input to this tool should be a comma seperated string of two, representing the image_path and the user description")]
371
  ]
 
 
 
372
  self.agent = initialize_agent(
373
  self.tools,
374
  self.llm,
@@ -377,6 +379,7 @@ class ConversationBot:
377
  memory=self.memory,
378
  return_intermediate_steps=True,
379
  agent_kwargs={'prefix': VISUAL_CHATGPT_PREFIX, 'format_instructions': VISUAL_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': VISUAL_CHATGPT_SUFFIX}, )
 
380
 
381
  def run_text(self, text, state):
382
  print("===============Running run_text =============")
@@ -417,12 +420,12 @@ class ConversationBot:
417
 
418
  bot = ConversationBot()
419
  with gr.Blocks(css="#chatbot .overflow-y-auto{height:500px}") as demo:
420
-
421
  chatbot = gr.Chatbot(elem_id="chatbot", label="Visual ChatGPT")
422
  state = gr.State([])
423
 
424
 
425
- with gr.Row(visible = True) as input_row:
426
  with gr.Column(scale=0.7):
427
  txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter, or upload an image").style(container=False)
428
  with gr.Column(scale=0.15, min_width=0):
@@ -430,6 +433,7 @@ with gr.Blocks(css="#chatbot .overflow-y-auto{height:500px}") as demo:
430
  with gr.Column(scale=0.15, min_width=0):
431
  btn = gr.UploadButton("Upload", file_types=["image"])
432
 
 
433
  txt.submit(bot.run_text, [txt, state], [chatbot, state])
434
  txt.submit(lambda: "", None, txt)
435
  btn.upload(bot.run_image, [btn, state, txt], [chatbot, state, txt])
 
291
  class ConversationBot:
292
  def __init__(self):
293
  print("Initializing VisualChatGPT")
 
294
  self.edit = ImageEditing(device="cuda")
295
  self.i2t = ImageCaptioning(device="cuda")
296
  self.t2i = T2I(device="cuda")
 
368
  #description="useful when you want to generate a new real image from both the user desciption and a human pose image. like: generate a real image of a human from this human pose image, or generate a new real image of a human from this pose. "
369
  #"The input to this tool should be a comma seperated string of two, representing the image_path and the user description")]
370
  ]
371
+
372
+ def init_langchain(api_key):
373
+ self.llm = OpenAI(temperature = 0, openai_api_key = api_key)
374
  self.agent = initialize_agent(
375
  self.tools,
376
  self.llm,
 
379
  memory=self.memory,
380
  return_intermediate_steps=True,
381
  agent_kwargs={'prefix': VISUAL_CHATGPT_PREFIX, 'format_instructions': VISUAL_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': VISUAL_CHATGPT_SUFFIX}, )
382
+ return gr.update(visible = True)
383
 
384
  def run_text(self, text, state):
385
  print("===============Running run_text =============")
 
420
 
421
  bot = ConversationBot()
422
  with gr.Blocks(css="#chatbot .overflow-y-auto{height:500px}") as demo:
423
+ openai_api_key_input = gr.Textbox(type = "Password", label = "Enter your OpenAI API key here")
424
  chatbot = gr.Chatbot(elem_id="chatbot", label="Visual ChatGPT")
425
  state = gr.State([])
426
 
427
 
428
+ with gr.Row(visible = False) as input_row:
429
  with gr.Column(scale=0.7):
430
  txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter, or upload an image").style(container=False)
431
  with gr.Column(scale=0.15, min_width=0):
 
433
  with gr.Column(scale=0.15, min_width=0):
434
  btn = gr.UploadButton("Upload", file_types=["image"])
435
 
436
+ openai_api_key_input.submit(bot.init_langchain,openai_api_key_input,[input_row])
437
  txt.submit(bot.run_text, [txt, state], [chatbot, state])
438
  txt.submit(lambda: "", None, txt)
439
  btn.upload(bot.run_image, [btn, state, txt], [chatbot, state, txt])