Spaces:
Sleeping
Sleeping
File size: 4,165 Bytes
6c48c9b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 |
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"c:\\Users\\ZZ029K826\\Documents\\GitHub\\llm-bill-chat-app\\.venv\\Lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
" from .autonotebook import tqdm as notebook_tqdm\n"
]
}
],
"source": [
"from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline\n"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"model_name = \"ai-forever/mGPT-1.3B-romanian\"\n",
"tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
"model = AutoModelForCausalLM.from_pretrained(model_name)\n"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Device set to use cpu\n"
]
}
],
"source": [
"chat_model = pipeline(\n",
" \"text-generation\",\n",
" model=model,\n",
" tokenizer=tokenizer,\n",
" device=-1, # Use CPU\n",
" max_length=300,\n",
" max_new_tokens=100,\n",
" truncation=True\n",
")\n"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"c:\\Users\\ZZ029K826\\Documents\\GitHub\\llm-bill-chat-app\\.venv\\Lib\\site-packages\\transformers\\generation\\configuration_utils.py:628: UserWarning: `do_sample` is set to `False`. However, `temperature` is set to `0.7` -- this flag is only used in sample-based generation modes. You should set `do_sample=True` or unset `temperature`.\n",
" warnings.warn(\n",
"c:\\Users\\ZZ029K826\\Documents\\GitHub\\llm-bill-chat-app\\.venv\\Lib\\site-packages\\transformers\\generation\\configuration_utils.py:633: UserWarning: `do_sample` is set to `False`. However, `top_p` is set to `0.9` -- this flag is only used in sample-based generation modes. You should set `do_sample=True` or unset `top_p`.\n",
" warnings.warn(\n",
"Both `max_new_tokens` (=100) and `max_length`(=300) seem to have been set. `max_new_tokens` will take precedence. Please refer to the documentation for more information. (https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Cum te simți astăzi?\n",
"\n",
"- Nu mă mai gândesc la asta. - Ba da, sunt bine!\n",
"\n",
"Nu e nimic de făcut în legatura cu tata... dar nu vreau ca el sa se raneasca pe mine acolo unde este locul meu preferat: un apartament mare si frumoasa casa din New York City care are o proprietate privată pentru copiii lui Michael Jackson şi familia ei.\n",
"\n",
"\n"
]
}
],
"source": [
"def generate_response(user_input, temperature=0.7, top_k=50, top_p=0.9, repetition_penalty=1.2):\n",
" response = chat_model(\n",
" user_input,\n",
" max_new_tokens=100,\n",
" temperature=temperature,\n",
" top_k=top_k,\n",
" top_p=top_p,\n",
" repetition_penalty=repetition_penalty\n",
" )\n",
" return response[0]['generated_text']\n",
"\n",
"# Test the function with adjusted parameters\n",
"user_input = \"Cum te simți astăzi?\"\n",
"response = generate_response(user_input)\n",
"print(response)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
|