#!/bin/env python

"""
    Input a single word, and it will graph it, 
    as embedded by CLIPModel vs CLIPTextModel

    It will then print out the "distance" between the two,
    and then show you a coordinate graph

    You will want to zoom in to actually see the differences, usually

"""


import sys
import json
import torch
from transformers import CLIPProcessor,CLIPModel,CLIPTextModel
import logging
# Turn off  stupid mesages from CLIPModel.load
logging.disable(logging.WARNING)

import PyQt5
import matplotlib
matplotlib.use('QT5Agg')  # Set the backend to TkAgg

import matplotlib.pyplot as plt


clipsrc="openai/clip-vit-large-patch14"

overlaymodel="text_encoder.bin"
overlaymodel2="text_encoder2.bin"

processor=None
clipmodel=None
cliptextmodel=None

device=torch.device("cuda")

print("loading processor from "+clipsrc,file=sys.stderr)
processor = CLIPProcessor.from_pretrained(clipsrc)
print("done",file=sys.stderr)

def clipmodel_one_time(text):
    global clipmodel
    if clipmodel == None:
        print("loading CLIPModel from "+clipsrc,file=sys.stderr)
        clipmodel = CLIPModel.from_pretrained(clipsrc)
        clipmodel = clipmodel.to(device)
        print("done",file=sys.stderr)

    inputs = processor(text=text, return_tensors="pt")
    inputs.to(device)
    with torch.no_grad():
        text_features = clipmodel.get_text_features(**inputs)
    return text_features
    #shape = (1,768)


def cliptextmodel_one_time(text):
    global cliptextmodel
    if cliptextmodel == None:
        print("loading CLIPTextModel from "+clipsrc,file=sys.stderr)
        cliptextmodel = CLIPTextModel.from_pretrained(clipsrc)
        cliptextmodel = cliptextmodel.to(device)
        print("done",file=sys.stderr)
    inputs = processor(text=text, return_tensors="pt")
    inputs.to(device)
    with torch.no_grad():
        outputs = cliptextmodel(**inputs)
    embeddings = outputs.pooler_output
    return embeddings
    # shape is (1,768)

def print_distance(emb1,emb2):
    targetdistance = torch.norm( emb1 - emb2)
    print("DISTANCE:",targetdistance)



def prompt_for_word():
    fig, ax = plt.subplots()

    text1 = input("Word or prompt: ")
    if text1 == "q":
        exit(0)

    print("generating embeddings for each now")

    emb1 = clipmodel_one_time(text1)[0]
    graph1=emb1.tolist()
    ax.plot(graph1, label="clipmodel")

    emb2 = cliptextmodel_one_time(text1)[0]
    graph2=emb2.tolist()
    ax.plot(graph2, label="cliptextmodel")

    print_distance(emb1,emb2)

    # Add labels, title, and legend
    #ax.set_xlabel('Index')
    ax.set_ylabel('Values')
    ax.set_title('Graph embedding from std libs')
    ax.legend()

    # Display the graph
    print("Pulling up the graph. To calculate more distances, close graph")
    plt.show()
    # Dont know why plt.show only works once !

while True:
    prompt_for_word()