|
--- |
|
license: apache-2.0 |
|
--- |
|
# Inference |
|
```python |
|
import time |
|
import torch |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
|
tokenizer = AutoTokenizer.from_pretrained("Mr-Vicky-01/gpt2-medium-Fintuned") |
|
model = AutoModelForCausalLM.from_pretrained("Mr-Vicky-01/gpt2-medium-Fintuned") |
|
``` |
|
|
|
```python |
|
BOS_TOKEN = "<sos>" |
|
alpaca_prompt = BOS_TOKEN + """You are an AI specialized in generating SQL queries. Your task is to provide SQL queries based on the given instruction and input. |
|
|
|
### Instruction: |
|
|
|
The schema for the scans table is as follows: |
|
|
|
org_name: Organization name |
|
group_name: Group name |
|
project_name: Project name |
|
git_url: Repo URL |
|
public: Boolean (True or False) |
|
frequency: Scan frequency (e.g., Once, Daily, Weekly, Monthly, Hourly) |
|
status: Scan status (e.g., COMPLETED, RUNNING, SCANNING, FAILED, CLONING, CLOCING) |
|
created_at: Timestamp (DD-MM-YYYY HH:MM) |
|
total_vulns: Number of vulnerabilities |
|
line_of_codes: Lines of code scanned |
|
files_scanned: Files scanned |
|
total_sast_findings: SAST scan vulnerabilities |
|
total_exec_time_sast: SAST scan execution time (seconds) |
|
total_secret_findings: Secret scan vulnerabilities |
|
total_exec_time_secret: Secret scan execution time (seconds) |
|
total_pii_findings: PII scan vulnerabilities |
|
total_exec_time_pii: PII scan execution time (seconds) |
|
total_sca_findings: SCA scan vulnerabilities |
|
total_exec_time_sca: SCA scan execution time (seconds) |
|
total_container_findings: Container scan vulnerabilities |
|
total_exec_time_container: Container scan execution time (seconds) |
|
total_malware_findings: Malware scan vulnerabilities |
|
total_exec_time_malware: Malware scan execution time (seconds) |
|
total_api_findings: API scan vulnerabilities |
|
total_exec_time_api: API scan execution time (seconds) |
|
total_iac_findings: IAC scan vulnerabilities |
|
total_exec_time_iac: IAC scan execution time (seconds) |
|
exec_time: Total scan execution time (seconds) |
|
total_findings: Total vulnerabilities found |
|
|
|
### Input: |
|
{} |
|
|
|
### Response: |
|
""" |
|
input_ques = "how many scans i completed today".lower() |
|
|
|
s = time.time() |
|
prompt = alpaca_prompt.format(input_ques) |
|
encodeds = tokenizer(prompt, return_tensors="pt",truncation=True).input_ids |
|
|
|
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") |
|
model.to(device) |
|
inputs = encodeds.to(device) |
|
|
|
# Increase max_new_tokens if needed |
|
generated_ids = model.generate(inputs, max_new_tokens=256,temperature=0.1, top_p=0.90, do_sample=True,pad_token_id=50258,eos_token_id=50258,num_return_sequences=1) |
|
print(tokenizer.decode(generated_ids[0]).replace(prompt,'').split('<eos>')[0]) |
|
e = time.time() |
|
print(f'time taken:{e-s}') |
|
``` |