michaelmc1618 commited on
Commit
c78ffbd
·
verified ·
1 Parent(s): cdda72d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -15
app.py CHANGED
@@ -3,7 +3,7 @@ from huggingface_hub import InferenceClient
3
  from transformers import AutoModelForCausalLM, pipeline
4
 
5
  # Use a pipeline as a high-level helper
6
- pipe = pipeline("visual-question-answering", model="openbmb/MiniCPM-Llama3-V-2_5", trust_remote_code=True)
7
 
8
  # Load model directly
9
  model = AutoModelForCausalLM.from_pretrained("microsoft/Florence-2-large", trust_remote_code=True)
@@ -13,14 +13,21 @@ For more information on `huggingface_hub` Inference API support, please check th
13
  """
14
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
15
 
16
- def respond(
17
- message,
18
- history: list[tuple[str, str]],
19
- system_message,
20
- max_tokens,
21
- temperature,
22
- top_p,
23
- ):
 
 
 
 
 
 
 
24
  messages = [{"role": "system", "content": system_message}]
25
 
26
  for val in history:
@@ -41,29 +48,54 @@ def respond(
41
  top_p=top_p,
42
  ):
43
  token = message.choices[0].delta.content
44
-
45
  response += token
46
  yield response
47
 
48
- """
49
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
50
- """
51
-
52
  def process_video(video):
 
 
 
 
 
 
 
 
 
53
  return f"Processing video: {video.name}"
54
 
55
  def process_pdf(pdf):
 
 
 
 
 
 
 
 
 
56
  return f"Processing PDF: {pdf.name}"
57
 
58
  def process_image(image):
 
 
 
 
 
 
 
 
 
59
  return f"Processing image: {image.name}"
60
 
 
61
  video_upload = gr.Interface(fn=process_video, inputs=gr.Video(), outputs="text", title="Upload a Video")
62
  pdf_upload = gr.Interface(fn=process_pdf, inputs=gr.File(file_types=['.pdf']), outputs="text", title="Upload a PDF")
63
  image_upload = gr.Interface(fn=process_image, inputs=gr.Image(), outputs="text", title="Upload an Image")
64
 
 
65
  tabbed_interface = gr.TabbedInterface([video_upload, pdf_upload, image_upload], ["Video", "PDF", "Image"])
66
 
 
67
  demo = gr.Blocks()
68
 
69
  with demo:
@@ -73,7 +105,7 @@ with demo:
73
  additional_inputs=[
74
  gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
75
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
76
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), # Corrected syntax error here
77
  gr.Slider(
78
  minimum=0.1,
79
  maximum=1.0,
 
3
  from transformers import AutoModelForCausalLM, pipeline
4
 
5
  # Use a pipeline as a high-level helper
6
+ pipe = pipeline("visual-question-answering", model="dandelin/vilt-b32-finetuned-vqa", trust_remote_code=True)
7
 
8
  # Load model directly
9
  model = AutoModelForCausalLM.from_pretrained("microsoft/Florence-2-large", trust_remote_code=True)
 
13
  """
14
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
15
 
16
+ def respond(message, history, system_message, max_tokens, temperature, top_p):
17
+ """
18
+ Generates a response based on the user message and chat history.
19
+
20
+ Args:
21
+ message (str): The user message.
22
+ history (list): A list of tuples containing user and assistant messages.
23
+ system_message (str): The system message.
24
+ max_tokens (int): Maximum number of tokens for the response.
25
+ temperature (float): Temperature for the response generation.
26
+ top_p (float): Top-p for nucleus sampling.
27
+
28
+ Yields:
29
+ str: The generated response.
30
+ """
31
  messages = [{"role": "system", "content": system_message}]
32
 
33
  for val in history:
 
48
  top_p=top_p,
49
  ):
50
  token = message.choices[0].delta.content
 
51
  response += token
52
  yield response
53
 
 
 
 
 
54
  def process_video(video):
55
+ """
56
+ Processes the uploaded video file.
57
+
58
+ Args:
59
+ video (gr.Video): The uploaded video file.
60
+
61
+ Returns:
62
+ str: Confirmation message for the uploaded video.
63
+ """
64
  return f"Processing video: {video.name}"
65
 
66
  def process_pdf(pdf):
67
+ """
68
+ Processes the uploaded PDF file.
69
+
70
+ Args:
71
+ pdf (gr.File): The uploaded PDF file.
72
+
73
+ Returns:
74
+ str: Confirmation message for the uploaded PDF.
75
+ """
76
  return f"Processing PDF: {pdf.name}"
77
 
78
  def process_image(image):
79
+ """
80
+ Processes the uploaded image file.
81
+
82
+ Args:
83
+ image (gr.Image): The uploaded image file.
84
+
85
+ Returns:
86
+ str: Confirmation message for the uploaded image.
87
+ """
88
  return f"Processing image: {image.name}"
89
 
90
+ # Define upload interfaces
91
  video_upload = gr.Interface(fn=process_video, inputs=gr.Video(), outputs="text", title="Upload a Video")
92
  pdf_upload = gr.Interface(fn=process_pdf, inputs=gr.File(file_types=['.pdf']), outputs="text", title="Upload a PDF")
93
  image_upload = gr.Interface(fn=process_image, inputs=gr.Image(), outputs="text", title="Upload an Image")
94
 
95
+ # Combine upload interfaces into tabs
96
  tabbed_interface = gr.TabbedInterface([video_upload, pdf_upload, image_upload], ["Video", "PDF", "Image"])
97
 
98
+ # Main Gradio interface
99
  demo = gr.Blocks()
100
 
101
  with demo:
 
105
  additional_inputs=[
106
  gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
107
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
108
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
109
  gr.Slider(
110
  minimum=0.1,
111
  maximum=1.0,