tmmdev commited on
Commit
01e3cc0
·
verified ·
1 Parent(s): 667bcfc

Update pattern_analyzer.py

Browse files
Files changed (1) hide show
  1. pattern_analyzer.py +18 -10
pattern_analyzer.py CHANGED
@@ -1,19 +1,27 @@
1
  import os
2
  os.environ['TRANSFORMERS_CACHE'] = '/tmp/transformers_cache'
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
- import numpy as np
5
- import pandas as pd
6
- import json
7
- from pattern_logic import PatternLogic
8
 
9
  class PatternAnalyzer:
10
  def __init__(self):
11
- self.model = AutoModelForCausalLM.from_pretrained(
12
- "tmmdev/codellama-pattern-analysis",
13
- load_in_8bit=True, # Enable 8-bit quantization
14
- device_map="auto", # Optimize device usage
15
- torch_dtype="auto" # Automatic precision selection
16
- )
 
 
 
 
 
 
 
 
 
 
 
17
  self.tokenizer = AutoTokenizer.from_pretrained("tmmdev/codellama-pattern-analysis")
18
 
19
 
 
1
  import os
2
  os.environ['TRANSFORMERS_CACHE'] = '/tmp/transformers_cache'
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
+ import torch
 
 
 
5
 
6
  class PatternAnalyzer:
7
  def __init__(self):
8
+ # Check if CUDA is available
9
+ if torch.cuda.is_available():
10
+ self.model = AutoModelForCausalLM.from_pretrained(
11
+ "tmmdev/codellama-pattern-analysis",
12
+ load_in_8bit=True,
13
+ device_map="auto",
14
+ torch_dtype="auto"
15
+ )
16
+ else:
17
+ # CPU fallback configuration
18
+ self.model = AutoModelForCausalLM.from_pretrained(
19
+ "tmmdev/codellama-pattern-analysis",
20
+ device_map="auto",
21
+ torch_dtype=torch.float32,
22
+ low_cpu_mem_usage=True
23
+ )
24
+
25
  self.tokenizer = AutoTokenizer.from_pretrained("tmmdev/codellama-pattern-analysis")
26
 
27