michaelmc1618 commited on
Commit
d47ec49
·
verified ·
1 Parent(s): 4ee06a7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -50
app.py CHANGED
@@ -8,38 +8,18 @@ pipe = pipeline("visual-question-answering", model="dandelin/vilt-b32-finetuned-
8
  # Load model directly
9
  model = AutoModelForCausalLM.from_pretrained("microsoft/Florence-2-large", trust_remote_code=True)
10
 
11
- """
12
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
13
- """
14
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
15
 
16
  def respond(message, history, system_message, max_tokens, temperature, top_p):
17
- """
18
- Generates a response based on the user message and chat history.
19
-
20
- Args:
21
- message (str): The user message.
22
- history (list): A list of tuples containing user and assistant messages.
23
- system_message (str): The system message.
24
- max_tokens (int): Maximum number of tokens for the response.
25
- temperature (float): Temperature for the response generation.
26
- top_p (float): Top-p for nucleus sampling.
27
-
28
- Yields:
29
- str: The generated response.
30
- """
31
  messages = [{"role": "system", "content": system_message}]
32
-
33
  for val in history:
34
  if val[0]:
35
  messages.append({"role": "user", "content": val[0]})
36
  if val[1]:
37
  messages.append({"role": "assistant", "content": val[1]})
38
-
39
  messages.append({"role": "user", "content": message})
40
 
41
  response = ""
42
-
43
  for message in client.chat_completion(
44
  messages,
45
  max_tokens=max_tokens,
@@ -52,50 +32,20 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
52
  yield response
53
 
54
  def process_video(video):
55
- """
56
- Processes the uploaded video file.
57
-
58
- Args:
59
- video (gr.Video): The uploaded video file.
60
-
61
- Returns:
62
- str: Confirmation message for the uploaded video.
63
- """
64
  return f"Processing video: {video.name}"
65
 
66
  def process_pdf(pdf):
67
- """
68
- Processes the uploaded PDF file.
69
-
70
- Args:
71
- pdf (gr.File): The uploaded PDF file.
72
-
73
- Returns:
74
- str: Confirmation message for the uploaded PDF.
75
- """
76
  return f"Processing PDF: {pdf.name}"
77
 
78
  def process_image(image):
79
- """
80
- Processes the uploaded image file.
81
-
82
- Args:
83
- image (gr.Image): The uploaded image file.
84
-
85
- Returns:
86
- str: Confirmation message for the uploaded image.
87
- """
88
  return f"Processing image: {image.name}"
89
 
90
- # Define upload interfaces
91
  video_upload = gr.Interface(fn=process_video, inputs=gr.Video(), outputs="text", title="Upload a Video")
92
  pdf_upload = gr.Interface(fn=process_pdf, inputs=gr.File(file_types=['.pdf']), outputs="text", title="Upload a PDF")
93
  image_upload = gr.Interface(fn=process_image, inputs=gr.Image(), outputs="text", title="Upload an Image")
94
 
95
- # Combine upload interfaces into tabs
96
  tabbed_interface = gr.TabbedInterface([video_upload, pdf_upload, image_upload], ["Video", "PDF", "Image"])
97
 
98
- # Main Gradio interface
99
  demo = gr.Blocks()
100
 
101
  with demo:
 
8
  # Load model directly
9
  model = AutoModelForCausalLM.from_pretrained("microsoft/Florence-2-large", trust_remote_code=True)
10
 
 
 
 
11
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
12
 
13
  def respond(message, history, system_message, max_tokens, temperature, top_p):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  messages = [{"role": "system", "content": system_message}]
 
15
  for val in history:
16
  if val[0]:
17
  messages.append({"role": "user", "content": val[0]})
18
  if val[1]:
19
  messages.append({"role": "assistant", "content": val[1]})
 
20
  messages.append({"role": "user", "content": message})
21
 
22
  response = ""
 
23
  for message in client.chat_completion(
24
  messages,
25
  max_tokens=max_tokens,
 
32
  yield response
33
 
34
  def process_video(video):
 
 
 
 
 
 
 
 
 
35
  return f"Processing video: {video.name}"
36
 
37
  def process_pdf(pdf):
 
 
 
 
 
 
 
 
 
38
  return f"Processing PDF: {pdf.name}"
39
 
40
  def process_image(image):
 
 
 
 
 
 
 
 
 
41
  return f"Processing image: {image.name}"
42
 
 
43
  video_upload = gr.Interface(fn=process_video, inputs=gr.Video(), outputs="text", title="Upload a Video")
44
  pdf_upload = gr.Interface(fn=process_pdf, inputs=gr.File(file_types=['.pdf']), outputs="text", title="Upload a PDF")
45
  image_upload = gr.Interface(fn=process_image, inputs=gr.Image(), outputs="text", title="Upload an Image")
46
 
 
47
  tabbed_interface = gr.TabbedInterface([video_upload, pdf_upload, image_upload], ["Video", "PDF", "Image"])
48
 
 
49
  demo = gr.Blocks()
50
 
51
  with demo: