Deepak Sahu commited on
Commit
33080cc
·
1 Parent(s): 22d61cc

first look

Browse files
Files changed (5) hide show
  1. .gitignore +2 -0
  2. .vscode/launch.json +23 -0
  3. README.md +19 -0
  4. app.py +71 -0
  5. z_generate.py +23 -0
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ sb-rag/
2
+ *.pyc
.vscode/launch.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ // Use IntelliSense to learn about possible attributes.
3
+ // Hover to view descriptions of existing attributes.
4
+ // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
5
+ "version": "0.2.0",
6
+ "configurations": [
7
+
8
+ {
9
+ "name": "Python Debugger: Current File with Arguments",
10
+ "type": "debugpy",
11
+ "request": "launch",
12
+ "cwd": "${workspaceFolder}",
13
+ "program": "${workspaceFolder}/sb-rag/Scripts/gradio.exe",
14
+ "console": "integratedTerminal",
15
+ "args": [
16
+ "app.py"
17
+ ],
18
+ "env": {
19
+ "HF_SERVELESS_API": "WRITE THIS"
20
+ }
21
+ }
22
+ ]
23
+ }
README.md CHANGED
@@ -11,3 +11,22 @@ short_description: Just another rag but with Images 🖼️
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
14
+
15
+ ## Local Debug
16
+
17
+ Highly recommend VS Code, makes life easy.
18
+
19
+ 1. Create Virtual environment with name `sb-rag` using the below command
20
+ `python -m venv sb-rag`
21
+
22
+ 2. Activate the environemnt (create new terminal VS Code to automatically do so)
23
+
24
+ 3. Edit `.vscode/launch.json`. Fill in the environment variable `HF_SERVERLESS_API`.
25
+
26
+ 4. Start VS Code debugger.
27
+
28
+ ## References
29
+
30
+ 1. UI Blocks Concepts: https://huggingface.co/learn/nlp-course/en/chapter9/7
31
+ 2. UI Row-Column Arrangement: https://www.gradio.app/guides/controlling-layout
32
+ 3. Show caption in image gallery: https://github.com/gradio-app/gradio/issues/3364
app.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import gradio as gr
3
+ from z_generate import ServerlessInference
4
+
5
+ # STATIC TEXT DISPLAY
6
+ TXT_APP_DESCRIPTION = '''
7
+ Just another Retrieval Augmented Generation that also retrieves images
8
+ '''
9
+ TXT_SOURCE_DOC_DESCRIPTION = '''
10
+ Manually Downloaded as HTML files:
11
+
12
+ 1. https://en.wikipedia.org/wiki/MS_Dhoni
13
+ 2. https://en.wikipedia.org/wiki/Jharkhand
14
+ 2. https://en.wikipedia.org/wiki/Cricket_World_Cup
15
+ '''
16
+
17
+
18
+ # UI Interface
19
+ demo = gr.Blocks()
20
+
21
+ llm = ServerlessInference()
22
+
23
+ # Processing Functions
24
+ def update_response(query:str = "something"):
25
+ return llm.test(query)
26
+
27
+ def update_gallery(text:str = "hell"):
28
+ imgs = [
29
+ ("http://www.marketingtool.online/en/face-generator/img/faces/avatar-1151ce9f4b2043de0d2e3b7826127998.jpg", "Some Description"),
30
+ ("http://www.marketingtool.online/en/face-generator/img/faces/avatar-116b5e92936b766b7fdfc242649337f7.jpg", "Another Description")
31
+ ]
32
+ return imgs
33
+
34
+
35
+ def ask_bot(text):
36
+ return update_response(text), update_gallery(text)
37
+
38
+ # UI Layout
39
+ with demo:
40
+ gr.Markdown(TXT_APP_DESCRIPTION)
41
+
42
+ with gr.Tabs():
43
+ with gr.TabItem("Ask Bot"):
44
+ with gr.Row(equal_height=True):
45
+ with gr.Column(scale=3):
46
+ text_input = gr.Textbox(
47
+ label="You query here",
48
+ placeholder="What positions apart from crickter did Dhoni held?"
49
+ )
50
+ with gr.Column(scale=1):
51
+ btn_generate = gr.Button("Generate Answer")
52
+
53
+ with gr.Row():
54
+ with gr.Column(scale=3):
55
+ text_output = gr.Textbox(label="Bot Response:", placeholder="Type in Query before I could answer")
56
+
57
+ with gr.Column(scale=2):
58
+ gallery = gr.Gallery(
59
+ label="Generated images", show_label=False, elem_id="gallery"
60
+ , columns=[3], rows=[1], object_fit="contain", height="auto"
61
+ )
62
+
63
+ btn_generate.click(ask_bot, text_input, outputs=[text_output, gallery])
64
+
65
+
66
+
67
+ ####
68
+ with gr.TabItem("Source Documents"):
69
+ gr.Markdown(TXT_SOURCE_DOC_DESCRIPTION)
70
+
71
+ demo.launch(debug=True)
z_generate.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from huggingface_hub import InferenceClient
2
+ import os
3
+
4
+ class ServerlessInference:
5
+ def __init__(self):
6
+ self.model:str = "HuggingFaceH4/zephyr-7b-beta"
7
+ self.client = InferenceClient(api_key=os.getenv("HF_SERVELESS_API"))
8
+
9
+ def test(self, query:str) -> str:
10
+ '''Responds to query using llm'''
11
+ messages:str = [
12
+ {
13
+ "role": "user",
14
+ "content": query
15
+ }
16
+ ]
17
+ completion = self.client.chat.completions.create(
18
+ model=self.model,
19
+ messages=messages,
20
+ max_tokens=500
21
+ )
22
+
23
+ return completion.choices[0].message.content