|
--- |
|
license: apache-2.0 |
|
--- |
|
# Huggingface format for Mobius Chat 12B 128k v4 |
|
|
|
```python |
|
import torch |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
def generate_prompt(instruction, input=""): |
|
instruction = instruction.strip().replace('\r\n','\n').replace('\n\n','\n') |
|
input = input.strip().replace('\r\n','\n').replace('\n\n','\n') |
|
if input: |
|
return f"""Instruction: {instruction} |
|
Input: {input} |
|
Response:""" |
|
else: |
|
return f"""User: {instruction} |
|
|
|
Assistant:""" |
|
#model = AutoModelForCausalLM.from_pretrained("TimeMobius/Mobius-Chat-12B-128k-HF", trust_remote_code=True, torch_dtype=torch.bfloat16).to(0) |
|
model = AutoModelForCausalLM.from_pretrained("TimeMobius/Mobius-Chat-12B-128k-HF", trust_remote_code=True, torch_dtype=torch.float16).to(0) |
|
tokenizer = AutoTokenizer.from_pretrained("TimeMobius/Mobius-Chat-12B-128k-HF", trust_remote_code=True) |
|
text = "Write a beginning of sci-fi novel" |
|
prompt = generate_prompt(text) |
|
inputs = tokenizer(prompt, return_tensors="pt").to(0) |
|
output = model.generate(inputs["input_ids"], max_new_tokens=128, do_sample=True, temperature=1.0, top_p=0.3, top_k=0, ) |
|
print(tokenizer.decode(output[0].tolist(), skip_special_tokens=True)) |
|
``` |