xavierbarbier commited on
Commit
ffbaa0d
1 Parent(s): 8410290

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -2
app.py CHANGED
@@ -8,6 +8,7 @@ import numpy as np
8
  from pypdf import PdfReader
9
  from gradio_pdf import PDF
10
  from transformers import pipeline
 
11
 
12
 
13
  title = "Mistral-7B-Instruct-GGUF Run On CPU-Basic Free Hardware"
@@ -29,7 +30,7 @@ model_name = "SmolLM-1.7B-Instruct.Q2_K.gguf"
29
  hf_hub_download(repo_id="mradermacher/SmolLM-1.7B-Instruct-GGUF", filename=model_name, local_dir=model_path, local_dir_use_symlinks=False)
30
  """
31
 
32
-
33
  import torch
34
  from transformers import AutoModelForCausalLM, AutoTokenizer
35
 
@@ -37,7 +38,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
37
  model_name = "croissantllm/CroissantLLMBase"
38
  tokenizer = AutoTokenizer.from_pretrained(model_name)
39
  model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
40
-
41
  print("Start the model init process")
42
  """model = model = GPT4All(model_name, model_path, allow_download = False, device="cpu")
43
 
@@ -48,6 +49,11 @@ model._is_chat_session_activated = False
48
 
49
  max_new_tokens = 2048"""
50
 
 
 
 
 
 
51
  model_kwargs = {'device': 'cpu'}
52
  encode_kwargs = {'normalize_embeddings': False}
53
  embeddings = HuggingFaceEmbeddings(
 
8
  from pypdf import PdfReader
9
  from gradio_pdf import PDF
10
  from transformers import pipeline
11
+ from transformers_js import import_transformers_js
12
 
13
 
14
  title = "Mistral-7B-Instruct-GGUF Run On CPU-Basic Free Hardware"
 
30
  hf_hub_download(repo_id="mradermacher/SmolLM-1.7B-Instruct-GGUF", filename=model_name, local_dir=model_path, local_dir_use_symlinks=False)
31
  """
32
 
33
+ """
34
  import torch
35
  from transformers import AutoModelForCausalLM, AutoTokenizer
36
 
 
38
  model_name = "croissantllm/CroissantLLMBase"
39
  tokenizer = AutoTokenizer.from_pretrained(model_name)
40
  model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
41
+ """
42
  print("Start the model init process")
43
  """model = model = GPT4All(model_name, model_path, allow_download = False, device="cpu")
44
 
 
49
 
50
  max_new_tokens = 2048"""
51
 
52
+ transformers = await import_transformers_js()
53
+ pipeline = transformers.pipeline
54
+
55
+ pipe = pipeline('text-generation', 'Xenova/distilgpt2')
56
+
57
  model_kwargs = {'device': 'cpu'}
58
  encode_kwargs = {'normalize_embeddings': False}
59
  embeddings = HuggingFaceEmbeddings(