File size: 1,513 Bytes
7979451
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a2f30b5
 
4a97976
 
7979451
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import streamlit as st
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer


model_name = "codellama/CodeLlama-7b-Python-hf"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)

device = torch.device("cuda" if torch.cuda.is_available() else 'cpu')
model = model.to(device)

# prompt = st.text_area("Enter your prompt:")


# def translate_text(text, source_lang, target_lang):
#     tokenizer.src_lang = source_lang
#     encoded_text = tokenizer(text, return_tensors="pt").to(device)

#     generated_tokens = model.generate(**encoded_text, forced_bos_token_id=tokenizer.lang_code_to_id[target_lang])

#     #Decode the output
#     translated_text = tokenizer.decode(generated_tokens[0], skip_special_tokens=True)
#     return translated_text


st.markdown("### Python Code Helper")
# source_language = ''
# target_language = ''
# source = st.sidebar.selectbox('Source Language', languages)
# if source:
#     source_language = lang_dict.get(source)
#     st.write(source_language)

# target = st.sidebar.selectbox('Target Language', languages)
# if target:
#     target_language = lang_dict.get(target)
#     st.write(target_language)

with st.form(key="myForm"):
    prompt = st.text_area("Enter your Prompt")
    submit = st.form_submit_button("Submit", type='primary')
    if submit and prompt:
        with st.spinner("Generating Response"):
            response = model.invoke(prompt)
            st.write(response)