|
import pyttsx3 |
|
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer |
|
|
|
|
|
model_name = "decapoda-research/llama-7b-hf" |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
|
|
|
tts_engine = pyttsx3.init() |
|
|
|
def generate_text(prompt): |
|
|
|
inputs = tokenizer(prompt, return_tensors="pt") |
|
outputs = model.generate(inputs["input_ids"], max_length=50) |
|
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
return generated_text |
|
|
|
def text_to_speech(text): |
|
|
|
tts_engine.say(text) |
|
tts_engine.runAndWait() |
|
|
|
def main(): |
|
prompt = "Once upon a time" |
|
generated_text = generate_text(prompt) |
|
print(f"Generated Text: {generated_text}") |
|
text_to_speech(generated_text) |
|
|
|
if __name__ == "__main__": |
|
main() |
|
|