Pankaj Munde commited on
Commit
33f66f9
·
1 Parent(s): 039a1df

Initial Commit.

Browse files
Files changed (2) hide show
  1. app.py +133 -0
  2. requirements.txt +4 -0
app.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ Created on Tue Feb 13 11:22:52 2024
5
+
6
+ @author: stinpankajm
7
+ """
8
+ import os
9
+ import base64
10
+ from huggingface_hub import InferenceClient
11
+ import gradio as gr
12
+
13
+ client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.2")
14
+
15
+ # Formats the prompt to hold all of the past messages
16
+ def format_prompt(message, history):
17
+ prompt = "<s>"
18
+ prompt_template = "[INST] {} [/INST]"
19
+
20
+ # Iterates through every past user input and response to be added to the prompt
21
+ for user_prompt, bot_response in history:
22
+ prompt += prompt_template.format(user_prompt)
23
+ prompt += f" {bot_response}</s> "
24
+
25
+ prompt += prompt_template.format(message)
26
+ return prompt
27
+
28
+ MODEL_PATH = "/home/stinpankajm/workspace/FG_POCs/Insects_Scouting/Models/F_Scout_v0.2"
29
+
30
+
31
+ css = """
32
+ #warning {background-color: #FFCCCB}
33
+ #flag {color: red;}
34
+ #topHeading {
35
+ padding: 30px 0px 30px 15px;
36
+ box-shadow: 1px 0px 30px 0px rgba(0, 0, 0, 0.1);
37
+ }
38
+ #logoImg {
39
+ max-width: 260px;
40
+ }
41
+ """
42
+
43
+
44
+ # Use for GEC, Doesn't track actual history
45
+ def format_prompt_finadvisor(message, history):
46
+ prompt = "<s>"
47
+
48
+ # String to add before every prompt
49
+ prompt_prefix = """\
50
+ You are an agriculture expert providing advice to farmers and users.
51
+
52
+ Your task is to answer questions related to agriculture based on the Question provided below.
53
+
54
+ Do not provide any explanations and respond only with medium short answers, add bullet points whenever necessary..
55
+
56
+ Your TEXT to analyze:
57
+ """
58
+
59
+ prompt_template = "[INST] " + prompt_prefix + ' {} [/INST]'
60
+
61
+
62
+ # Iterates through every past user input and response to be added to the prompt
63
+ for user_prompt, bot_response in history:
64
+ prompt += prompt_template.format(user_prompt)
65
+ prompt += f" {bot_response}</s> \n"
66
+
67
+ prompt += prompt_template.format(message)
68
+ return prompt
69
+
70
+
71
+ def generate(prompt, history, system_prompt, temperature=0.9, max_new_tokens=512, top_p=0.95, repetition_penalty=1.0):
72
+ temperature = float(temperature)
73
+ if temperature < 1e-2: temperature = 1e-2
74
+ top_p = float(top_p)
75
+
76
+ generate_kwargs = dict(temperature=temperature, max_new_tokens=max_new_tokens, top_p=top_p, repetition_penalty=repetition_penalty, do_sample=True, seed=42,)
77
+
78
+ #formatted_prompt = format_prompt_grammar(f"Corrected Sentence: {prompt}", history)
79
+ formatted_prompt = format_prompt_finadvisor(f"{system_prompt} {prompt}", history)
80
+ # print("\nPROMPT: \n\t" + formatted_prompt)
81
+
82
+ # Generate text from the HF inference
83
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
84
+ output = ""
85
+
86
+ for response in stream:
87
+ output += response.token.text
88
+ yield output
89
+ return output
90
+
91
+ additional_inputs=[
92
+ gr.Textbox( label="System Prompt", value="" , max_lines=1, interactive=True, ),
93
+ gr.Slider( label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs", ),
94
+ gr.Slider( label="Max new tokens", value=256, minimum=0, maximum=1048, step=64, interactive=True, info="The maximum numbers of new tokens", ),
95
+ gr.Slider( label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens", ),
96
+ gr.Slider( label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens", )
97
+ ]
98
+
99
+ with gr.Blocks(css=css) as demo:
100
+ """
101
+ Top Custom Header
102
+ """
103
+ with gr.Row(elem_id="topHeading"):
104
+ # with gr.Column(elem_id="logoImg"):
105
+ # with open('./static/logo.jpeg', "rb") as image:
106
+ # encoded = base64.b64encode(image.read()).decode()
107
+ # logo_image = f"data:image/png;base64,{encoded}"
108
+
109
+ # gr.HTML(f'<img src={logo_image} style="width:155px">')
110
+
111
+ # gr.Image(Image.open('./static/FarmGyan logo_1.png'))
112
+ with gr.Column():
113
+ gr.Markdown(
114
+ """
115
+ # FinAdvisor
116
+ """,
117
+ )
118
+
119
+ """
120
+ Model Prediction
121
+ """
122
+
123
+
124
+
125
+ gr.ChatInterface(
126
+ fn=generate,
127
+ chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
128
+ additional_inputs=additional_inputs,
129
+ title="AgExpert",
130
+ examples=[],
131
+ ).queue().launch()
132
+ # ).queue().launch(auth=("shivraiAdmin", "FarmERP@2024"), auth_message="Please enter your credentials to get started.")
133
+ # demo.launch(show_api=False)
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio
2
+ pytorch
3
+ transformers
4
+ datasets