jhansi1 commited on
Commit
c500dff
·
verified ·
1 Parent(s): 81ca32d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -29
app.py CHANGED
@@ -1,29 +1,59 @@
1
- import streamlit as st
2
- from transformers import pipeline
3
-
4
- # Load the fill-mask pipeline with the specified model
5
- @st.cache_resource
6
- def load_pipeline():
7
- """Load the ParlBERT model for the fill-mask task."""
8
- pipe = pipeline("fill-mask", model="InfAI/parlbert-german-law")
9
- return pipe
10
-
11
- # Load the pipeline
12
- pipe = load_pipeline()
13
-
14
- # Streamlit app UI
15
- st.title("ParlBERT German Law Fill-in-the-Blank Assistant")
16
- st.write("Enter a sentence with a `[MASK]` token, and the model will predict possible words.")
17
-
18
- # Input text area for the sentence with a default example
19
- user_input = st.text_input("Your sentence (use [MASK] as a placeholder):", "Das Gesetz [MASK] den Bürger.")
20
-
21
- # Generate predictions when the button is clicked
22
- if st.button("Generate Prediction"):
23
- # Run the model on the input sentence
24
- predictions = pipe(user_input)
25
-
26
- # Display predictions
27
- st.write("Predicted Fill-ins:")
28
- for i, pred in enumerate(predictions):
29
- st.write(f"{i+1}. {pred['sequence']} (Score: {pred['score']:.4f})")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+
4
+ # Initialize the InferenceClient with the model 'InfAI/parlbert-german-law'
5
+ client = InferenceClient("InfAI/parlbert-german-law")
6
+
7
+
8
+ def respond(
9
+ message,
10
+ history: list[tuple[str, str]],
11
+ system_message,
12
+ max_tokens,
13
+ temperature,
14
+ top_p,
15
+ ):
16
+ messages = [{"role": "system", "content": system_message}]
17
+
18
+ for val in history:
19
+ if val[0]:
20
+ messages.append({"role": "user", "content": val[0]})
21
+ if val[1]:
22
+ messages.append({"role": "assistant", "content": val[1]})
23
+
24
+ messages.append({"role": "user", "content": message})
25
+
26
+ response = ""
27
+
28
+ for message in client.chat_completion(
29
+ messages,
30
+ max_tokens=max_tokens,
31
+ stream=True,
32
+ temperature=temperature,
33
+ top_p=top_p,
34
+ ):
35
+ token = message.choices[0].delta.content
36
+
37
+ response += token
38
+ yield response
39
+
40
+
41
+ # Create the Gradio interface for chatbot interaction
42
+ demo = gr.ChatInterface(
43
+ respond,
44
+ additional_inputs=[
45
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
46
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
47
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
48
+ gr.Slider(
49
+ minimum=0.1,
50
+ maximum=1.0,
51
+ value=0.95,
52
+ step=0.05,
53
+ label="Top-p (nucleus sampling)",
54
+ ),
55
+ ],
56
+ )
57
+
58
+ if __name__ == "__main__":
59
+ demo.launch()