Spaces:
Sleeping
Sleeping
File size: 841 Bytes
98ed57d e3c1dfe 98ed57d e3c1dfe 98ed57d e3c1dfe 98ed57d e3c1dfe 98ed57d e3c1dfe 98ed57d e3c1dfe 98ed57d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 |
import streamlit as st
from transformers import BloomTokenizerFast, BloomForCausalLM, pipeline
text="اكتب مقالا من عدة أسطر عن الذكاء الصناعي وتطوراته"
prompt = f'Instruction:\n{text}\n\nResponse:'
model = BloomForCausalLM.from_pretrained('Naseej/noon-7b')
tokenizer = BloomTokenizerFast.from_pretrained('Naseej/noon-7b')
generation_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
# We recommend the provided hyperparameters for generation
# But encourage you to try different values
response = generation_pipeline(prompt,
pad_token_id=tokenizer.eos_token_id,
do_sample=False,
num_beams=4,
max_length=500,
top_p=0.1,
top_k=20,
repetition_penalty = 3.0,
no_repeat_ngram_size=3)[0]['generated_text']
# print(response)
st.write(response) |