namannn commited on
Commit
82a2d2f
·
verified ·
1 Parent(s): 4bc3aed

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -11
app.py CHANGED
@@ -1,16 +1,42 @@
1
  import streamlit as st
2
- from transformers import pipeline
3
 
4
- sentiment_pipeline = pipeline("sentiment-analysis")
 
 
5
 
6
- st.title("Sentiment Analysis with HuggingFace Spaces")
7
- st.write("Enter a sentence to analyze its sentiment:")
 
8
 
9
- user_input = st.text_input("")
10
- if user_input:
11
- result = sentiment_pipeline(user_input)
12
- sentiment = result[0]["label"]
13
- confidence = result[0]["score"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
- st.write(f"Sentiment: {sentiment}")
16
- st.write(f"Confidence: {confidence:.2f}")
 
1
  import streamlit as st
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
+ # Load model and tokenizer
5
+ tokenizer = AutoTokenizer.from_pretrained("namannn/llama2-13b-hyperbolic-cluster-pruned")
6
+ model = AutoModelForCausalLM.from_pretrained("namannn/llama2-13b-hyperbolic-cluster-pruned")
7
 
8
+ # Streamlit UI components
9
+ st.title("Text Generation with LLaMa2-13b Hyperbolic Model")
10
+ st.write("Enter a prompt below and the model will generate text.")
11
 
12
+ # User input for prompt
13
+ prompt = st.text_area("Input Prompt", "Once upon a time, in a land far away")
14
+
15
+ # Slider for controlling the length of the output
16
+ max_length = st.slider("Max Length of Generated Text", min_value=50, max_value=200, value=100)
17
+
18
+ # Button to trigger text generation
19
+ if st.button("Generate Text"):
20
+ if prompt:
21
+ # Encode the prompt text
22
+ inputs = tokenizer(prompt, return_tensors="pt")
23
+
24
+ # Generate text with the model
25
+ outputs = model.generate(
26
+ inputs["input_ids"],
27
+ max_length=max_length,
28
+ num_return_sequences=1,
29
+ no_repeat_ngram_size=2, # You can tune this for diversity
30
+ do_sample=True, # Use sampling for diverse generation
31
+ top_k=50, # Top-k sampling for diversity
32
+ top_p=0.95, # Top-p (nucleus) sampling
33
+ temperature=0.7 # Control randomness (lower = more deterministic)
34
+ )
35
+
36
+ # Decode and display generated text
37
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
38
+ st.subheader("Generated Text:")
39
+ st.write(generated_text)
40
+ else:
41
+ st.warning("Please enter a prompt to generate text.")
42