File size: 1,086 Bytes
ccf69c4 02422ea ccf69c4 a30d3cd 002de53 0bfafc9 ccf69c4 0bfafc9 30bd241 002de53 52a149c 002de53 032fe98 52a149c 032fe98 52a149c 032fe98 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
import numpy as np
import argparse
import functools
import os
import pickle
import sys
from datasets import Dataset
import gradio as gr
from pynvml import *
from transformers import pipeline
pipe = pipeline("translation", model="Helsinki-NLP/opus-mt-en-es")
def predict(text):
return pipe(text)[0]["translation_text"]
def print_gpu_utilization():
nvmlInit()
handle = nvmlDeviceGetHandleByIndex(0)
info = nvmlDeviceGetMemoryInfo(handle)
return f"GPU memory occupied: {info.used//1024**2} MB."
def print_summary(result):
print(f"Time: {result.metrics['train_runtime']:.2f}")
print(f"Samples/second: {result.metrics['train_samples_per_second']:.2f}")
print_gpu_utilization()
seq_len, dataset_size = 512, 512
dummy_data = {
"input_ids": np.random.randint(100, 30000, (dataset_size, seq_len)),
"labels": np.random.randint(0, 1, (dataset_size)),
}
ds = Dataset.from_dict(dummy_data)
ds.set_format("pt")
result = print_gpu_utilization()
iface = gr.Interface(
fn=predict,
inputs='text',
outputs='text',
examples=[f'{result}']
)
iface.launch() |