File size: 2,473 Bytes
fd34502
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import os
os.system('pip install ctransformers')

import ctransformers
import time
import requests
from tqdm import tqdm


import uuid
#Get the model file - you will need Expandable Storage to make this work

if not os.path.isfile('llama-2-7b.ggmlv3.q4_K_S.bin'):
  print("Downloading Model from HuggingFace")
  url = "https://huggingface.co/TheBloke/Llama-2-7B-GGML/resolve/main/llama-2-7b.ggmlv3.q4_K_S.bin"
  response = requests.get(url, stream=True)
  total_size_in_bytes= int(response.headers.get('content-length', 0))
  block_size = 1024 #1 Kibibyte
  progress_bar = tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True)
  with open('llama-2-7b.ggmlv3.q4_K_S.bin', 'wb') as file:
      for data in response.iter_content(block_size):
          progress_bar.update(len(data))
          file.write(data)
  progress_bar.close()
  if total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes:
      print("ERROR, something went wrong")
  
#Sets up the transformer library and adds in the Llama-2 model

configObj = ctransformers.Config(stop=["\n", 'User'])
config = ctransformers.AutoConfig(config=configObj, model_type='llama')
config.config.stop = ["\n"]

llm = ctransformers.AutoModelForCausalLM.from_pretrained('./llama-2-7b.ggmlv3.q4_K_S.bin', config=config)
print("Loaded model")

def time_it(func):
    def wrapper(*args, **kwargs):
        start_time = time.time()
        result = func(*args, **kwargs)
        end_time = time.time()
        execution_time = end_time - start_time
        print(f"Function '{func.__name__}' took {execution_time:.6f} seconds to execute.")
        return result
    return wrapper

def complete(prompt, stop=["User", "Assistant"]):
  tokens = llm.tokenize(prompt)
  token_count = 0
  output = ''
  for token in llm.generate(tokens):
    token_count += 1
    result = llm.detokenize(token)
    output += result
    for word in stop:
      if word in output:
        print('\n')
        return [output, token_count]
    print(result, end='',flush=True)

  print('\n')
  return [output, token_count]

while True:
  question = input("\nWhat is your question? > ")
  start_time = time.time()
  output, token_count = complete(f'User: {question}. Can you please answer this as informative but concisely as possible.\nAssistant: ')
  end_time = time.time()
  execution_time = end_time - start_time
  print(f"{token_count} tokens generated in {execution_time:.6f} seconds.\n{token_count/execution_time} tokens per second")