codeShare commited on
Commit
84d4abc
·
verified ·
1 Parent(s): e35abc2

Upload sd_token_similarity_calculator.ipynb

Browse files
Files changed (1) hide show
  1. sd_token_similarity_calculator.ipynb +48 -21
sd_token_similarity_calculator.ipynb CHANGED
@@ -318,13 +318,7 @@
318
  "source": [
319
  "# @title 📝 Get Prompt text_encoding similarity to the pre-calc. text_encodings\n",
320
  "prompt = \" a fast car on the road \" # @param {\"type\":\"string\",\"placeholder\":\"Write a prompt\"}\n",
321
- "list_size = 100 # @param {type:'number'}\n",
322
- "start_at_index = 0 # @param {type:'number'}\n",
323
- "print_Similarity = True # @param {type:\"boolean\"}\n",
324
- "print_Suffix = True # @param {type:\"boolean\"}\n",
325
- "print_Prefix = True # @param {type:\"boolean\"}\n",
326
- "print_Descriptions = True # @param {type:\"boolean\"}\n",
327
- "compact_Output = False # @param {type:\"boolean\"}\n",
328
  "\n",
329
  "from transformers import AutoTokenizer\n",
330
  "tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
@@ -379,8 +373,26 @@
379
  "#------#\n",
380
  "suffix_sorted, suffix_indices = torch.sort(dots,dim=0 , descending=True)\n",
381
  "#------#\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
382
  "\n",
383
- "#Print the results\n",
384
  "# title Show the 100 most similiar suffix and prefix text-encodings to the text encoding\n",
385
  "RANGE = list_size\n",
386
  "_suffixes = '{'\n",
@@ -444,11 +456,10 @@
444
  " if(compact_Output):\n",
445
  " print((prefixes + _suffixes).replace('}{', '|'))\n",
446
  " else:\n",
447
- " print(prefixes)\n",
448
- "\n"
449
  ],
450
  "metadata": {
451
- "id": "xc-PbIYF428y"
452
  },
453
  "execution_count": null,
454
  "outputs": []
@@ -512,7 +523,7 @@
512
  "height": 1000
513
  }
514
  },
515
- "execution_count": 4,
516
  "outputs": [
517
  {
518
  "output_type": "display_data",
@@ -593,10 +604,26 @@
593
  " d.close() #close the file\n",
594
  "#------#\n",
595
  "suffix_sorted, suffix_indices = torch.sort(dots,dim=0 , descending=True)\n",
596
- "#------#\n",
597
- "\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
598
  "\n",
599
- "#Print the results\n",
600
  "# title Show the 100 most similiar suffix and prefix text-encodings to the text encoding\n",
601
  "RANGE = list_size\n",
602
  "_suffixes = '{'\n",
@@ -619,21 +646,21 @@
619
  " name = ahead + get_suffix(id) + behind\n",
620
  " if(get_suffix(id) == ' '): name = ahead + f'{id}' + behind\n",
621
  " _suffixes = _suffixes + name + '|'\n",
622
- " _sims = _sims + f'{round(sim,2)} %' + '|'\n",
623
  "#------#\n",
624
  "_suffixes = (_suffixes + '}').replace('|}', '}')\n",
625
  "_sims = (_sims + '}').replace('|}', '}')\n",
626
  "#------#\n",
627
  "\n",
 
628
  "suffixes = _suffixes\n",
629
  "sims = _sims\n",
630
- "\n",
631
  "if(not print_Suffix): suffixes = ''\n",
632
  "if(not print_Similarity): sims = ''\n",
633
  "\n",
634
  "if(not compact_Output):\n",
635
  " if(print_Descriptions):\n",
636
- " print(f'The {start_at_index}-{start_at_index + RANGE} most similiar suffix items to prompt : ' + suffixes)\n",
637
  " print(f'The {start_at_index}-{start_at_index + RANGE} similarity % for suffix items : ' + sims)\n",
638
  " print('')\n",
639
  " else:\n",
@@ -655,15 +682,15 @@
655
  "if(not print_Prefix): prefixes = ''\n",
656
  "\n",
657
  "if(print_Descriptions):\n",
658
- " print(f'The {start_at_index}-{start_at_index + RANGE} most similiar prefixes to prompt : ' + prefixes)\n",
659
  "else:\n",
660
  " if(compact_Output):\n",
661
  " print((prefixes + _suffixes).replace('}{', '|'))\n",
662
  " else:\n",
663
- " print(prefixes)\n"
664
  ],
665
  "metadata": {
666
- "id": "rebogpoyOG8k"
667
  },
668
  "execution_count": null,
669
  "outputs": []
 
318
  "source": [
319
  "# @title 📝 Get Prompt text_encoding similarity to the pre-calc. text_encodings\n",
320
  "prompt = \" a fast car on the road \" # @param {\"type\":\"string\",\"placeholder\":\"Write a prompt\"}\n",
321
+ "\n",
 
 
 
 
 
 
322
  "\n",
323
  "from transformers import AutoTokenizer\n",
324
  "tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
 
373
  "#------#\n",
374
  "suffix_sorted, suffix_indices = torch.sort(dots,dim=0 , descending=True)\n",
375
  "#------#\n",
376
+ "\n"
377
+ ],
378
+ "metadata": {
379
+ "id": "xc-PbIYF428y"
380
+ },
381
+ "execution_count": null,
382
+ "outputs": []
383
+ },
384
+ {
385
+ "cell_type": "code",
386
+ "source": [
387
+ "# @title 📝 Print the results\n",
388
+ "list_size = 100 # @param {type:'number'}\n",
389
+ "start_at_index = 0 # @param {type:'number'}\n",
390
+ "print_Similarity = True # @param {type:\"boolean\"}\n",
391
+ "print_Suffix = True # @param {type:\"boolean\"}\n",
392
+ "print_Prefix = True # @param {type:\"boolean\"}\n",
393
+ "print_Descriptions = True # @param {type:\"boolean\"}\n",
394
+ "compact_Output = False # @param {type:\"boolean\"}\n",
395
  "\n",
 
396
  "# title Show the 100 most similiar suffix and prefix text-encodings to the text encoding\n",
397
  "RANGE = list_size\n",
398
  "_suffixes = '{'\n",
 
456
  " if(compact_Output):\n",
457
  " print((prefixes + _suffixes).replace('}{', '|'))\n",
458
  " else:\n",
459
+ " print(prefixes)"
 
460
  ],
461
  "metadata": {
462
+ "id": "_vnVbxcFf7WV"
463
  },
464
  "execution_count": null,
465
  "outputs": []
 
523
  "height": 1000
524
  }
525
  },
526
+ "execution_count": null,
527
  "outputs": [
528
  {
529
  "output_type": "display_data",
 
604
  " d.close() #close the file\n",
605
  "#------#\n",
606
  "suffix_sorted, suffix_indices = torch.sort(dots,dim=0 , descending=True)\n",
607
+ "#------#"
608
+ ],
609
+ "metadata": {
610
+ "id": "rebogpoyOG8k"
611
+ },
612
+ "execution_count": null,
613
+ "outputs": []
614
+ },
615
+ {
616
+ "cell_type": "code",
617
+ "source": [
618
+ "# @title 🖼️ Print the results\n",
619
+ "list_size = 100 # @param {type:'number'}\n",
620
+ "start_at_index = 0 # @param {type:'number'}\n",
621
+ "print_Similarity = True # @param {type:\"boolean\"}\n",
622
+ "print_Suffix = True # @param {type:\"boolean\"}\n",
623
+ "print_Prefix = True # @param {type:\"boolean\"}\n",
624
+ "print_Descriptions = True # @param {type:\"boolean\"}\n",
625
+ "compact_Output = False # @param {type:\"boolean\"}\n",
626
  "\n",
 
627
  "# title Show the 100 most similiar suffix and prefix text-encodings to the text encoding\n",
628
  "RANGE = list_size\n",
629
  "_suffixes = '{'\n",
 
646
  " name = ahead + get_suffix(id) + behind\n",
647
  " if(get_suffix(id) == ' '): name = ahead + f'{id}' + behind\n",
648
  " _suffixes = _suffixes + name + '|'\n",
649
+ " _sims = _sims + f'{round(sim*100,2)} %' + '|'\n",
650
  "#------#\n",
651
  "_suffixes = (_suffixes + '}').replace('|}', '}')\n",
652
  "_sims = (_sims + '}').replace('|}', '}')\n",
653
  "#------#\n",
654
  "\n",
655
+ "\n",
656
  "suffixes = _suffixes\n",
657
  "sims = _sims\n",
 
658
  "if(not print_Suffix): suffixes = ''\n",
659
  "if(not print_Similarity): sims = ''\n",
660
  "\n",
661
  "if(not compact_Output):\n",
662
  " if(print_Descriptions):\n",
663
+ " print(f'The {start_at_index}-{start_at_index + RANGE} most similiar suffix items to image : ' + suffixes)\n",
664
  " print(f'The {start_at_index}-{start_at_index + RANGE} similarity % for suffix items : ' + sims)\n",
665
  " print('')\n",
666
  " else:\n",
 
682
  "if(not print_Prefix): prefixes = ''\n",
683
  "\n",
684
  "if(print_Descriptions):\n",
685
+ " print(f'The {start_at_index}-{start_at_index + RANGE} most similiar prefixes to image : ' + prefixes)\n",
686
  "else:\n",
687
  " if(compact_Output):\n",
688
  " print((prefixes + _suffixes).replace('}{', '|'))\n",
689
  " else:\n",
690
+ " print(prefixes)"
691
  ],
692
  "metadata": {
693
+ "id": "JkzncP8SgKtS"
694
  },
695
  "execution_count": null,
696
  "outputs": []