nam194 commited on
Commit
455b92e
1 Parent(s): fa8f55a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +131 -0
app.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import streamlit as st
3
+ from langchain.llms import HuggingFaceHub
4
+ from models import return_sum_models
5
+
6
+ class LLM_Langchain():
7
+ def __init__(self):
8
+ st.warning("Warning: Input function need to be clean and may take long to process")
9
+ st.header('🦜 Code summarization with CodeT5-small + CodeXGLUE dataset')
10
+ st.warning("Enter your huggingface API key first !")
11
+
12
+ self.API_KEY = st.sidebar.text_input(
13
+ 'API Key',
14
+ type='password',
15
+ help="Type in your HuggingFace API key to use this app")
16
+
17
+ languages = ["php", "java", "javascript", "python", "ruby"]
18
+ model_parent = st.sidebar.selectbox(
19
+ label = "Choose Language",
20
+ options = languages,
21
+ help="Choose languages",
22
+ )
23
+
24
+ if model_parent is None:
25
+ model_name_visibility = True
26
+ else:
27
+ model_name_visibility = False
28
+ options = return_sum_models(model_parent)
29
+
30
+ self.model_name = st.sidebar.selectbox(
31
+ label = "Models",
32
+ options = options,
33
+ help="Chosen Model to predict",
34
+ disabled=model_name_visibility
35
+ )
36
+
37
+ self.max_new_tokens = st.sidebar.slider(
38
+ label="Token Length",
39
+ min_value=32,
40
+ max_value=1024,
41
+ step=32,
42
+ value=120,
43
+ help="Set the max tokens to get accurate results"
44
+ )
45
+
46
+ self.top_k = st.sidebar.slider(
47
+ label="top_k",
48
+ min_value=1,
49
+ max_value=50,
50
+ step=1,
51
+ value=30,
52
+ help="Set the top_k"
53
+ )
54
+
55
+ self.top_p = st.sidebar.slider(
56
+ label="top_p",
57
+ min_value=0.1,
58
+ max_value=1,
59
+ step=0.05,
60
+ value=0.95,
61
+ help="Set the top_p"
62
+ )
63
+
64
+
65
+ self.model_kwargs = {
66
+ "max_new_tokens": self.max_new_tokens,
67
+ "top_k": self.top_k,
68
+ "top_p": self.top_p
69
+ }
70
+
71
+ os.environ['HUGGINGFACEHUB_API_TOKEN'] = self.API_KEY
72
+
73
+
74
+ def generate_response(self, input_text):
75
+
76
+
77
+ llm = HuggingFaceHub(
78
+ repo_id = self.model_name,
79
+ model_kwargs = self.model_kwargs
80
+ )
81
+
82
+ return llm(input_text)
83
+
84
+
85
+
86
+ def form_data(self):
87
+ # with st.form('my_form'):
88
+ try:
89
+ if not self.API_KEY.startswith('hf_'):
90
+ st.warning('Please enter your API key!', icon='⚠')
91
+
92
+
93
+ if "messages" not in st.session_state:
94
+ st.session_state.messages = []
95
+
96
+ st.write(f"You are using {self.model_name} model")
97
+
98
+ for message in st.session_state.messages:
99
+ with st.chat_message(message.get('role')):
100
+ st.write(message.get("content"))
101
+ text = st.chat_input(disabled=text_input_visibility)
102
+
103
+ if text:
104
+ st.session_state.messages.append(
105
+ {
106
+ "role":"user",
107
+ "content": text
108
+ }
109
+ )
110
+ with st.chat_message("user"):
111
+ st.write(text)
112
+
113
+ if text.lower() == "clear":
114
+ del st.session_state.messages
115
+ return
116
+
117
+ result = self.generate_response(text)
118
+ st.session_state.messages.append(
119
+ {
120
+ "role": "assistant",
121
+ "content": result
122
+ }
123
+ )
124
+ with st.chat_message('assistant'):
125
+ st.markdown(result)
126
+
127
+ except Exception as e:
128
+ st.error(e, icon="🚨")
129
+
130
+ model = LLM_Langchain()
131
+ model.form_data()