Spaces:
Paused
Paused
File size: 2,969 Bytes
29b560e 3b47068 314c465 a22e0d4 e93ccdc 3b47068 a22e0d4 4952bf3 29b560e a22e0d4 06e297b a22e0d4 06e297b a22e0d4 25de67e 314c465 73c4071 a22e0d4 47728bf 74704c7 0580074 74704c7 4157280 efce22a 874ae6d 47728bf e9017bf dc9192f 4711fbe 4952bf3 47728bf a22e0d4 29b560e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 |
import gradio as gr
from huggingface_hub import login
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
from threading import Thread
import torch
MODEL = "m-a-p/OpenCodeInterpreter-DS-33B"
CHAT_TEMPLATE = "{%- set found_item = false -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set found_item = true -%}\n {%- endif -%}\n{%- endfor -%}\n{%- if not found_item -%}\n{{'You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer.\\n'}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{{'### Response:\\n'}}\n"
system_message = "You are a computer programmer that can translate python code to C++ in order to improve performance"
def user_prompt_for(python):
return f"Rewrite this python code to C++. You must search for the maximum performance. \
Format your response in Markdown. This is the python Code: \
\n\n\
{python}"
def messages_for(python):
return [
{"role": "system", "content": system_message},
{"role": "user", "content": user_prompt_for(python)}
]
tokenizer = AutoTokenizer.from_pretrained(MODEL)
tokenizer.chat_template = CHAT_TEMPLATE
model = AutoModelForCausalLM.from_pretrained(MODEL, torch_dtype=torch.bfloat16, device_map="auto")
model.eval()
decode_kwargs = dict(skip_special_tokens=True)
streamer = TextIteratorStreamer(tokenizer, decode_kwargs=decode_kwargs)
cplusplus = None
def translate(python):
formatted_prompt = tokenizer.apply_chat_template(
messages_for(python),
tokenize=False,
add_generation_prompt=True,
return_tensors="pt")
inputs = tokenizer(formatted_prompt, return_tensors="pt", padding=True).to(model.device)
attention_mask = inputs.attention_mask
input_ids = inputs.input_ids
generation_kwargs = dict(
input_ids=input_ids,
streamer=streamer,
attention_mask=attention_mask,
max_new_tokens=1024,
do_sample=False,
pad_token_id=tokenizer.eos_token_id,
eos_token_id=tokenizer.eos_token_id,
)
thread = Thread(target=model.generate, kwargs=generation_kwargs)
thread.start()
cplusplus = ""
for chunk in streamer:
cplusplus += chunk
yield cplusplus
demo = gr.Interface(fn=translate, inputs="code", outputs="markdown")
demo.launch()
|