Moha782 commited on
Commit
7052a9b
·
verified ·
1 Parent(s): d789f01

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -50
app.py CHANGED
@@ -1,43 +1,15 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
- from transformers import RagTokenizer, RagTokenForGeneration
4
- from typing import List, Dict, Tuple
5
- import re
6
- import os
7
- import torch
8
- from math import ceil
9
-
10
- # Load the RAG model and tokenizer
11
- rag_tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq")
12
- rag_model = RagTokenForGeneration.from_pretrained("facebook/rag-token-nq")
13
-
14
- # Load your PDF document
15
- pdf_path = "apexcustoms.pdf"
16
- with open(pdf_path, 'rb') as f:
17
- pdf_text = f.read().decode('utf-8', errors='ignore')
18
-
19
- # Split the PDF text into chunks
20
- split_pattern = r'\n\n'
21
- doc_chunks = re.split(split_pattern, pdf_text)
22
-
23
- # Preprocess the corpus
24
- corpus = rag_tokenizer(doc_chunks, return_tensors="pt", padding=True, truncation=True).input_ids
25
-
26
- # Pad the corpus to be a multiple of `n_docs`
27
- n_docs = rag_model.config.n_docs
28
- corpus_length = corpus.size(-1)
29
- pad_length = ceil(corpus_length / n_docs) * n_docs - corpus_length
30
- corpus = torch.nn.functional.pad(corpus, (0, pad_length), mode='constant', value=rag_model.config.pad_token_id)
31
 
32
  """
33
- For more information on huggingface_hub Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
34
  """
35
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
36
 
37
 
38
  def respond(
39
  message,
40
- history: List[Tuple[str, str]],
41
  system_message,
42
  max_tokens,
43
  temperature,
@@ -53,22 +25,6 @@ def respond(
53
 
54
  messages.append({"role": "user", "content": message})
55
 
56
- # Tokenize the input
57
- inputs = rag_tokenizer(message, return_tensors="pt")
58
- input_ids = inputs.pop("input_ids")
59
-
60
- # Generate with the RAG model
61
- output_ids = rag_model.generate(
62
- input_ids=input_ids,
63
- context_input_ids=corpus,
64
- max_length=max_tokens,
65
- do_sample=True,
66
- top_p=top_p,
67
- top_k=0,
68
- num_beams=2,
69
- )
70
- retrieved_context = rag_tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
71
-
72
  response = ""
73
 
74
  for message in client.chat_completion(
@@ -77,7 +33,6 @@ def respond(
77
  stream=True,
78
  temperature=temperature,
79
  top_p=top_p,
80
- context=retrieved_context, # Include the retrieved context
81
  ):
82
  token = message.choices[0].delta.content
83
 
@@ -90,9 +45,9 @@ For information on how to customize the ChatInterface, peruse the gradio docs: h
90
  demo = gr.ChatInterface(
91
  respond,
92
  additional_inputs=[
93
- gr.Textbox(value="You are a helpful car configuration assistant, specifically you are the assistant for Apex Customs (https://www.apexcustoms.com/). Given the user's input, provide suggestions for car models, colors, and customization options. Be creative and conversational in your responses. You should remember the user car model and tailor your answers accordingly. \n\nUser: ", label="System message"),
94
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
95
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
96
  gr.Slider(
97
  minimum=0.1,
98
  maximum=1.0,
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
  """
5
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
7
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
9
 
10
  def respond(
11
  message,
12
+ history: list[tuple[str, str]],
13
  system_message,
14
  max_tokens,
15
  temperature,
 
25
 
26
  messages.append({"role": "user", "content": message})
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  response = ""
29
 
30
  for message in client.chat_completion(
 
33
  stream=True,
34
  temperature=temperature,
35
  top_p=top_p,
 
36
  ):
37
  token = message.choices[0].delta.content
38
 
 
45
  demo = gr.ChatInterface(
46
  respond,
47
  additional_inputs=[
48
+ gr.Textbox(value="You are a helpful car configuration assistant, specifically you are the assistant for Apex Customs (https://www.apexcustoms.com/). Given the user's input, provide suggestions for car models, colors, and customization options. Be creative and conversational in your responses. You limit yourself to answering the given question and maybe propose a suggestion but not write the next question of the user. You should remember the user car model and tailor your answers accordingly. \n\nUser: ", label="System message"),
49
+ gr.Slider(minimum=1, maximum=4096, value=512, step=1, label="Max new tokens"),
50
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.3, step=0.1, label="Temperature"),
51
  gr.Slider(
52
  minimum=0.1,
53
  maximum=1.0,