index
int64
0
100k
blob_id
stringlengths
40
40
code
stringlengths
7
7.27M
steps
listlengths
1
1.25k
error
bool
2 classes
300
5ddfeb49c16a7452c99126f1a837f3c0bed0ec10
import requests from urllib.parse import urlparse from bs4 import BeautifulSoup import re import datetime import random pages = set() # Retrieve a list of all Internal links foound on a page. def getInternalLinks(bs, includeUrl): includeUrl = f'{urlparse(includeUrl).scheme}://{urlparse(includeUrl).netloc}' internalLinks = [] # Finds all links thhat begin with a "/" for link in bs.find_all('a', href=re.compile('^(/|.*'+includeUrl+')')): if link.attrs['href'] is not None: if link.attrs['href'] not in internalLinks: if link.attrs['href'].startswith('/'): internalLinks.append(includeUrl+link.attrs['href']) else: internalLinks.append(link.attrs['href']) return internalLinks # Retrieves a list of all external links found on a pagee. def getExternalLinks(bs, excludeUrl): externalLinks = [] # Finds all links that starts with "http" that do # not contain the current URL for link in bs.find_all('a', href=re.compile('^(http|www)((?!'+excludeUrl+').)*$')): if link.attrs['href'] is not None: if link.attrs['href'] not in externalLinks: externalLinks.append(link.attrs['href']) return externalLinks def getRandomExternalLink(startingPage): html = requests.get(startingPage) bs = BeautifulSoup(html.text, 'html.parser') externalLinks = getExternalLinks(bs, urlparse(startingPage).netloc) if len(externalLinks) == 0: print('No external links, looking around the site for one.') domain = f'{urlparse(startingPage).scheme}://{urlparse(startingPage).netloc}' internalLinks = getInternalLinks(bs, domain) return getRandomExternalLink(internalLinks[random.randint(0, len(internalLinks)-1)]) else: return externalLinks[random.randint(0, len(externalLinks)-1)] # Collects a list of all external URLs found on the site allExtLinks = set() allIntLinks = set() def getAllExternalLinks(siteUrl): html = requests.get(siteUrl) domain = f"{urlparse(siteUrl).scheme}://{urlparse(siteUrl).netloc}" bs = BeautifulSoup(html.text, 'html.parser') internalLinks = getInternalLinks(bs, domain) externalLinks = getExternalLinks(bs, domain) for link in externalLinks: if link not in allExtLinks: allExtLinks.add(link) print(link) for link in internalLinks: if link not in allIntLinks: allIntLinks.add(link) getAllExternalLinks(link) def followExternalOnly(startingSite): externalLink = getRandomExternalLink(startingSite) print(f"Random external link is: {externalLink}") followExternalOnly(externalLink)
[ "import requests\nfrom urllib.parse import urlparse\nfrom bs4 import BeautifulSoup\nimport re\nimport datetime\nimport random\n\npages = set()\n\n# Retrieve a list of all Internal links foound on a page.\ndef getInternalLinks(bs, includeUrl):\n includeUrl = f'{urlparse(includeUrl).scheme}://{urlparse(includeUrl).netloc}'\n internalLinks = []\n # Finds all links thhat begin with a \"/\"\n for link in bs.find_all('a',\n href=re.compile('^(/|.*'+includeUrl+')')):\n if link.attrs['href'] is not None:\n if link.attrs['href'] not in internalLinks:\n if link.attrs['href'].startswith('/'):\n internalLinks.append(includeUrl+link.attrs['href'])\n else:\n internalLinks.append(link.attrs['href'])\n return internalLinks\n\n# Retrieves a list of all external links found on a pagee.\ndef getExternalLinks(bs, excludeUrl):\n externalLinks = []\n # Finds all links that starts with \"http\" that do\n # not contain the current URL\n for link in bs.find_all('a',\n href=re.compile('^(http|www)((?!'+excludeUrl+').)*$')):\n if link.attrs['href'] is not None:\n if link.attrs['href'] not in externalLinks:\n externalLinks.append(link.attrs['href'])\n return externalLinks\n\ndef getRandomExternalLink(startingPage):\n html = requests.get(startingPage)\n bs = BeautifulSoup(html.text, 'html.parser')\n externalLinks = getExternalLinks(bs, \n urlparse(startingPage).netloc)\n if len(externalLinks) == 0:\n print('No external links, looking around the site for one.')\n domain = f'{urlparse(startingPage).scheme}://{urlparse(startingPage).netloc}'\n internalLinks = getInternalLinks(bs, domain)\n return getRandomExternalLink(internalLinks[random.randint(0, len(internalLinks)-1)])\n else:\n return externalLinks[random.randint(0, len(externalLinks)-1)]\n\n# Collects a list of all external URLs found on the site\nallExtLinks = set()\nallIntLinks = set()\n\ndef getAllExternalLinks(siteUrl):\n html = requests.get(siteUrl)\n domain = f\"{urlparse(siteUrl).scheme}://{urlparse(siteUrl).netloc}\"\n bs = BeautifulSoup(html.text, 'html.parser')\n internalLinks = getInternalLinks(bs, domain)\n externalLinks = getExternalLinks(bs, domain)\n for link in externalLinks:\n if link not in allExtLinks:\n allExtLinks.add(link)\n print(link)\n for link in internalLinks:\n if link not in allIntLinks:\n allIntLinks.add(link)\n getAllExternalLinks(link)\n\n\ndef followExternalOnly(startingSite):\n externalLink = getRandomExternalLink(startingSite)\n print(f\"Random external link is: {externalLink}\")\n followExternalOnly(externalLink)\n\n\n", "import requests\nfrom urllib.parse import urlparse\nfrom bs4 import BeautifulSoup\nimport re\nimport datetime\nimport random\npages = set()\n\n\ndef getInternalLinks(bs, includeUrl):\n includeUrl = (\n f'{urlparse(includeUrl).scheme}://{urlparse(includeUrl).netloc}')\n internalLinks = []\n for link in bs.find_all('a', href=re.compile('^(/|.*' + includeUrl + ')')):\n if link.attrs['href'] is not None:\n if link.attrs['href'] not in internalLinks:\n if link.attrs['href'].startswith('/'):\n internalLinks.append(includeUrl + link.attrs['href'])\n else:\n internalLinks.append(link.attrs['href'])\n return internalLinks\n\n\ndef getExternalLinks(bs, excludeUrl):\n externalLinks = []\n for link in bs.find_all('a', href=re.compile('^(http|www)((?!' +\n excludeUrl + ').)*$')):\n if link.attrs['href'] is not None:\n if link.attrs['href'] not in externalLinks:\n externalLinks.append(link.attrs['href'])\n return externalLinks\n\n\ndef getRandomExternalLink(startingPage):\n html = requests.get(startingPage)\n bs = BeautifulSoup(html.text, 'html.parser')\n externalLinks = getExternalLinks(bs, urlparse(startingPage).netloc)\n if len(externalLinks) == 0:\n print('No external links, looking around the site for one.')\n domain = (\n f'{urlparse(startingPage).scheme}://{urlparse(startingPage).netloc}'\n )\n internalLinks = getInternalLinks(bs, domain)\n return getRandomExternalLink(internalLinks[random.randint(0, len(\n internalLinks) - 1)])\n else:\n return externalLinks[random.randint(0, len(externalLinks) - 1)]\n\n\nallExtLinks = set()\nallIntLinks = set()\n\n\ndef getAllExternalLinks(siteUrl):\n html = requests.get(siteUrl)\n domain = f'{urlparse(siteUrl).scheme}://{urlparse(siteUrl).netloc}'\n bs = BeautifulSoup(html.text, 'html.parser')\n internalLinks = getInternalLinks(bs, domain)\n externalLinks = getExternalLinks(bs, domain)\n for link in externalLinks:\n if link not in allExtLinks:\n allExtLinks.add(link)\n print(link)\n for link in internalLinks:\n if link not in allIntLinks:\n allIntLinks.add(link)\n getAllExternalLinks(link)\n\n\ndef followExternalOnly(startingSite):\n externalLink = getRandomExternalLink(startingSite)\n print(f'Random external link is: {externalLink}')\n followExternalOnly(externalLink)\n", "<import token>\npages = set()\n\n\ndef getInternalLinks(bs, includeUrl):\n includeUrl = (\n f'{urlparse(includeUrl).scheme}://{urlparse(includeUrl).netloc}')\n internalLinks = []\n for link in bs.find_all('a', href=re.compile('^(/|.*' + includeUrl + ')')):\n if link.attrs['href'] is not None:\n if link.attrs['href'] not in internalLinks:\n if link.attrs['href'].startswith('/'):\n internalLinks.append(includeUrl + link.attrs['href'])\n else:\n internalLinks.append(link.attrs['href'])\n return internalLinks\n\n\ndef getExternalLinks(bs, excludeUrl):\n externalLinks = []\n for link in bs.find_all('a', href=re.compile('^(http|www)((?!' +\n excludeUrl + ').)*$')):\n if link.attrs['href'] is not None:\n if link.attrs['href'] not in externalLinks:\n externalLinks.append(link.attrs['href'])\n return externalLinks\n\n\ndef getRandomExternalLink(startingPage):\n html = requests.get(startingPage)\n bs = BeautifulSoup(html.text, 'html.parser')\n externalLinks = getExternalLinks(bs, urlparse(startingPage).netloc)\n if len(externalLinks) == 0:\n print('No external links, looking around the site for one.')\n domain = (\n f'{urlparse(startingPage).scheme}://{urlparse(startingPage).netloc}'\n )\n internalLinks = getInternalLinks(bs, domain)\n return getRandomExternalLink(internalLinks[random.randint(0, len(\n internalLinks) - 1)])\n else:\n return externalLinks[random.randint(0, len(externalLinks) - 1)]\n\n\nallExtLinks = set()\nallIntLinks = set()\n\n\ndef getAllExternalLinks(siteUrl):\n html = requests.get(siteUrl)\n domain = f'{urlparse(siteUrl).scheme}://{urlparse(siteUrl).netloc}'\n bs = BeautifulSoup(html.text, 'html.parser')\n internalLinks = getInternalLinks(bs, domain)\n externalLinks = getExternalLinks(bs, domain)\n for link in externalLinks:\n if link not in allExtLinks:\n allExtLinks.add(link)\n print(link)\n for link in internalLinks:\n if link not in allIntLinks:\n allIntLinks.add(link)\n getAllExternalLinks(link)\n\n\ndef followExternalOnly(startingSite):\n externalLink = getRandomExternalLink(startingSite)\n print(f'Random external link is: {externalLink}')\n followExternalOnly(externalLink)\n", "<import token>\n<assignment token>\n\n\ndef getInternalLinks(bs, includeUrl):\n includeUrl = (\n f'{urlparse(includeUrl).scheme}://{urlparse(includeUrl).netloc}')\n internalLinks = []\n for link in bs.find_all('a', href=re.compile('^(/|.*' + includeUrl + ')')):\n if link.attrs['href'] is not None:\n if link.attrs['href'] not in internalLinks:\n if link.attrs['href'].startswith('/'):\n internalLinks.append(includeUrl + link.attrs['href'])\n else:\n internalLinks.append(link.attrs['href'])\n return internalLinks\n\n\ndef getExternalLinks(bs, excludeUrl):\n externalLinks = []\n for link in bs.find_all('a', href=re.compile('^(http|www)((?!' +\n excludeUrl + ').)*$')):\n if link.attrs['href'] is not None:\n if link.attrs['href'] not in externalLinks:\n externalLinks.append(link.attrs['href'])\n return externalLinks\n\n\ndef getRandomExternalLink(startingPage):\n html = requests.get(startingPage)\n bs = BeautifulSoup(html.text, 'html.parser')\n externalLinks = getExternalLinks(bs, urlparse(startingPage).netloc)\n if len(externalLinks) == 0:\n print('No external links, looking around the site for one.')\n domain = (\n f'{urlparse(startingPage).scheme}://{urlparse(startingPage).netloc}'\n )\n internalLinks = getInternalLinks(bs, domain)\n return getRandomExternalLink(internalLinks[random.randint(0, len(\n internalLinks) - 1)])\n else:\n return externalLinks[random.randint(0, len(externalLinks) - 1)]\n\n\n<assignment token>\n\n\ndef getAllExternalLinks(siteUrl):\n html = requests.get(siteUrl)\n domain = f'{urlparse(siteUrl).scheme}://{urlparse(siteUrl).netloc}'\n bs = BeautifulSoup(html.text, 'html.parser')\n internalLinks = getInternalLinks(bs, domain)\n externalLinks = getExternalLinks(bs, domain)\n for link in externalLinks:\n if link not in allExtLinks:\n allExtLinks.add(link)\n print(link)\n for link in internalLinks:\n if link not in allIntLinks:\n allIntLinks.add(link)\n getAllExternalLinks(link)\n\n\ndef followExternalOnly(startingSite):\n externalLink = getRandomExternalLink(startingSite)\n print(f'Random external link is: {externalLink}')\n followExternalOnly(externalLink)\n", "<import token>\n<assignment token>\n\n\ndef getInternalLinks(bs, includeUrl):\n includeUrl = (\n f'{urlparse(includeUrl).scheme}://{urlparse(includeUrl).netloc}')\n internalLinks = []\n for link in bs.find_all('a', href=re.compile('^(/|.*' + includeUrl + ')')):\n if link.attrs['href'] is not None:\n if link.attrs['href'] not in internalLinks:\n if link.attrs['href'].startswith('/'):\n internalLinks.append(includeUrl + link.attrs['href'])\n else:\n internalLinks.append(link.attrs['href'])\n return internalLinks\n\n\ndef getExternalLinks(bs, excludeUrl):\n externalLinks = []\n for link in bs.find_all('a', href=re.compile('^(http|www)((?!' +\n excludeUrl + ').)*$')):\n if link.attrs['href'] is not None:\n if link.attrs['href'] not in externalLinks:\n externalLinks.append(link.attrs['href'])\n return externalLinks\n\n\n<function token>\n<assignment token>\n\n\ndef getAllExternalLinks(siteUrl):\n html = requests.get(siteUrl)\n domain = f'{urlparse(siteUrl).scheme}://{urlparse(siteUrl).netloc}'\n bs = BeautifulSoup(html.text, 'html.parser')\n internalLinks = getInternalLinks(bs, domain)\n externalLinks = getExternalLinks(bs, domain)\n for link in externalLinks:\n if link not in allExtLinks:\n allExtLinks.add(link)\n print(link)\n for link in internalLinks:\n if link not in allIntLinks:\n allIntLinks.add(link)\n getAllExternalLinks(link)\n\n\ndef followExternalOnly(startingSite):\n externalLink = getRandomExternalLink(startingSite)\n print(f'Random external link is: {externalLink}')\n followExternalOnly(externalLink)\n", "<import token>\n<assignment token>\n\n\ndef getInternalLinks(bs, includeUrl):\n includeUrl = (\n f'{urlparse(includeUrl).scheme}://{urlparse(includeUrl).netloc}')\n internalLinks = []\n for link in bs.find_all('a', href=re.compile('^(/|.*' + includeUrl + ')')):\n if link.attrs['href'] is not None:\n if link.attrs['href'] not in internalLinks:\n if link.attrs['href'].startswith('/'):\n internalLinks.append(includeUrl + link.attrs['href'])\n else:\n internalLinks.append(link.attrs['href'])\n return internalLinks\n\n\ndef getExternalLinks(bs, excludeUrl):\n externalLinks = []\n for link in bs.find_all('a', href=re.compile('^(http|www)((?!' +\n excludeUrl + ').)*$')):\n if link.attrs['href'] is not None:\n if link.attrs['href'] not in externalLinks:\n externalLinks.append(link.attrs['href'])\n return externalLinks\n\n\n<function token>\n<assignment token>\n\n\ndef getAllExternalLinks(siteUrl):\n html = requests.get(siteUrl)\n domain = f'{urlparse(siteUrl).scheme}://{urlparse(siteUrl).netloc}'\n bs = BeautifulSoup(html.text, 'html.parser')\n internalLinks = getInternalLinks(bs, domain)\n externalLinks = getExternalLinks(bs, domain)\n for link in externalLinks:\n if link not in allExtLinks:\n allExtLinks.add(link)\n print(link)\n for link in internalLinks:\n if link not in allIntLinks:\n allIntLinks.add(link)\n getAllExternalLinks(link)\n\n\n<function token>\n", "<import token>\n<assignment token>\n\n\ndef getInternalLinks(bs, includeUrl):\n includeUrl = (\n f'{urlparse(includeUrl).scheme}://{urlparse(includeUrl).netloc}')\n internalLinks = []\n for link in bs.find_all('a', href=re.compile('^(/|.*' + includeUrl + ')')):\n if link.attrs['href'] is not None:\n if link.attrs['href'] not in internalLinks:\n if link.attrs['href'].startswith('/'):\n internalLinks.append(includeUrl + link.attrs['href'])\n else:\n internalLinks.append(link.attrs['href'])\n return internalLinks\n\n\n<function token>\n<function token>\n<assignment token>\n\n\ndef getAllExternalLinks(siteUrl):\n html = requests.get(siteUrl)\n domain = f'{urlparse(siteUrl).scheme}://{urlparse(siteUrl).netloc}'\n bs = BeautifulSoup(html.text, 'html.parser')\n internalLinks = getInternalLinks(bs, domain)\n externalLinks = getExternalLinks(bs, domain)\n for link in externalLinks:\n if link not in allExtLinks:\n allExtLinks.add(link)\n print(link)\n for link in internalLinks:\n if link not in allIntLinks:\n allIntLinks.add(link)\n getAllExternalLinks(link)\n\n\n<function token>\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n\n\ndef getAllExternalLinks(siteUrl):\n html = requests.get(siteUrl)\n domain = f'{urlparse(siteUrl).scheme}://{urlparse(siteUrl).netloc}'\n bs = BeautifulSoup(html.text, 'html.parser')\n internalLinks = getInternalLinks(bs, domain)\n externalLinks = getExternalLinks(bs, domain)\n for link in externalLinks:\n if link not in allExtLinks:\n allExtLinks.add(link)\n print(link)\n for link in internalLinks:\n if link not in allIntLinks:\n allIntLinks.add(link)\n getAllExternalLinks(link)\n\n\n<function token>\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n" ]
false
301
1e1f918ba24f5a5f13b9b01289ebfda65bae572d
def warshall_floyd(N): INF = 10**20 path = [[INF for _ in range(N+ 1)] for _ in range(N+1)] graph = get_graph() for i in range(N+1): path[i][i] = 0 for g in graph: x = g[0] y = g[1] l = g[2] path[x][y] = path[y][x] = l for start in range(N+1): for goal in range(N+1): for way in range(N+1): path[start][goal] = path[goal][start] = min(path[start][goal], path[start][way] + path[way][goal]) return path def get_graph(): graph = [input_as_int() for _ in range(M)] return graph def input_as_int(): return list(map(int,input().split())) R,C,K = input_as_int() N = int(input()) print(ans)
[ "def warshall_floyd(N):\n INF = 10**20\n path = [[INF for _ in range(N+ 1)] for _ in range(N+1)]\n graph = get_graph()\n for i in range(N+1):\n path[i][i] = 0\n for g in graph:\n x = g[0]\n y = g[1]\n l = g[2]\n path[x][y] = path[y][x] = l\n for start in range(N+1):\n for goal in range(N+1):\n for way in range(N+1):\n path[start][goal] = path[goal][start] = min(path[start][goal], path[start][way] + path[way][goal])\n return path\n\ndef get_graph():\n graph = [input_as_int() for _ in range(M)]\n return graph\ndef input_as_int():\n return list(map(int,input().split()))\n\nR,C,K = input_as_int()\nN = int(input())\n\n\n\nprint(ans)", "def warshall_floyd(N):\n INF = 10 ** 20\n path = [[INF for _ in range(N + 1)] for _ in range(N + 1)]\n graph = get_graph()\n for i in range(N + 1):\n path[i][i] = 0\n for g in graph:\n x = g[0]\n y = g[1]\n l = g[2]\n path[x][y] = path[y][x] = l\n for start in range(N + 1):\n for goal in range(N + 1):\n for way in range(N + 1):\n path[start][goal] = path[goal][start] = min(path[start][\n goal], path[start][way] + path[way][goal])\n return path\n\n\ndef get_graph():\n graph = [input_as_int() for _ in range(M)]\n return graph\n\n\ndef input_as_int():\n return list(map(int, input().split()))\n\n\nR, C, K = input_as_int()\nN = int(input())\nprint(ans)\n", "def warshall_floyd(N):\n INF = 10 ** 20\n path = [[INF for _ in range(N + 1)] for _ in range(N + 1)]\n graph = get_graph()\n for i in range(N + 1):\n path[i][i] = 0\n for g in graph:\n x = g[0]\n y = g[1]\n l = g[2]\n path[x][y] = path[y][x] = l\n for start in range(N + 1):\n for goal in range(N + 1):\n for way in range(N + 1):\n path[start][goal] = path[goal][start] = min(path[start][\n goal], path[start][way] + path[way][goal])\n return path\n\n\ndef get_graph():\n graph = [input_as_int() for _ in range(M)]\n return graph\n\n\ndef input_as_int():\n return list(map(int, input().split()))\n\n\n<assignment token>\nprint(ans)\n", "def warshall_floyd(N):\n INF = 10 ** 20\n path = [[INF for _ in range(N + 1)] for _ in range(N + 1)]\n graph = get_graph()\n for i in range(N + 1):\n path[i][i] = 0\n for g in graph:\n x = g[0]\n y = g[1]\n l = g[2]\n path[x][y] = path[y][x] = l\n for start in range(N + 1):\n for goal in range(N + 1):\n for way in range(N + 1):\n path[start][goal] = path[goal][start] = min(path[start][\n goal], path[start][way] + path[way][goal])\n return path\n\n\ndef get_graph():\n graph = [input_as_int() for _ in range(M)]\n return graph\n\n\ndef input_as_int():\n return list(map(int, input().split()))\n\n\n<assignment token>\n<code token>\n", "<function token>\n\n\ndef get_graph():\n graph = [input_as_int() for _ in range(M)]\n return graph\n\n\ndef input_as_int():\n return list(map(int, input().split()))\n\n\n<assignment token>\n<code token>\n", "<function token>\n\n\ndef get_graph():\n graph = [input_as_int() for _ in range(M)]\n return graph\n\n\n<function token>\n<assignment token>\n<code token>\n", "<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n" ]
false
302
04538cc5c9c68582cc9aa2959faae2d7547ab2ee
try: import xml.etree.cElementTree as ET except ImportError: import xml.etree.ElementTree as ET import data_helpers def write_to_file(file,line): file.write(line+"\n") def cat_map(): catmap={} id=1 f=open("cat") cat=set([s.strip() for s in list(f.readlines())]) for i in cat: catmap[i]=id id=id+1 return catmap tree = ET.ElementTree(file="test.xml") root = tree.getroot() cnn=open("cnn","a") lstm=open("lstm","a") cat=open("cat","a") for vespaadd in root: document = vespaadd.find("document") if(document!=None): subject = document.find("subject") content = document.find("content") maincat = document.find("maincat") if(subject==None): continue if(content==None): content=subject if(maincat==None): continue write_to_file(cnn,data_helpers.clean_str(subject.text)) write_to_file(lstm,data_helpers.clean_str(content.text)) write_to_file(cat,data_helpers.clean_str(maincat.text)) cnn.close() lstm.close() cat.close()
[ "try:\n\timport xml.etree.cElementTree as ET\nexcept ImportError:\n\timport xml.etree.ElementTree as ET\nimport data_helpers\n\ndef write_to_file(file,line):\n\tfile.write(line+\"\\n\")\n\ndef cat_map():\n\tcatmap={}\n\tid=1\n\tf=open(\"cat\")\n\tcat=set([s.strip() for s in list(f.readlines())])\n\tfor i in cat:\n\t\tcatmap[i]=id\n\t\tid=id+1\n\treturn catmap\n\ntree = ET.ElementTree(file=\"test.xml\")\nroot = tree.getroot()\ncnn=open(\"cnn\",\"a\")\nlstm=open(\"lstm\",\"a\")\ncat=open(\"cat\",\"a\")\nfor vespaadd in root:\n\tdocument = vespaadd.find(\"document\")\n\tif(document!=None):\n\t\tsubject = document.find(\"subject\")\n\t\tcontent = document.find(\"content\")\n\t\tmaincat = document.find(\"maincat\")\n\t\tif(subject==None):\n\t\t\tcontinue\n\t\tif(content==None):\n\t\t\tcontent=subject\n\t\tif(maincat==None):\n\t\t\tcontinue\n\t\twrite_to_file(cnn,data_helpers.clean_str(subject.text))\n\t\twrite_to_file(lstm,data_helpers.clean_str(content.text))\n\t\twrite_to_file(cat,data_helpers.clean_str(maincat.text))\ncnn.close()\nlstm.close()\ncat.close()", "try:\n import xml.etree.cElementTree as ET\nexcept ImportError:\n import xml.etree.ElementTree as ET\nimport data_helpers\n\n\ndef write_to_file(file, line):\n file.write(line + '\\n')\n\n\ndef cat_map():\n catmap = {}\n id = 1\n f = open('cat')\n cat = set([s.strip() for s in list(f.readlines())])\n for i in cat:\n catmap[i] = id\n id = id + 1\n return catmap\n\n\ntree = ET.ElementTree(file='test.xml')\nroot = tree.getroot()\ncnn = open('cnn', 'a')\nlstm = open('lstm', 'a')\ncat = open('cat', 'a')\nfor vespaadd in root:\n document = vespaadd.find('document')\n if document != None:\n subject = document.find('subject')\n content = document.find('content')\n maincat = document.find('maincat')\n if subject == None:\n continue\n if content == None:\n content = subject\n if maincat == None:\n continue\n write_to_file(cnn, data_helpers.clean_str(subject.text))\n write_to_file(lstm, data_helpers.clean_str(content.text))\n write_to_file(cat, data_helpers.clean_str(maincat.text))\ncnn.close()\nlstm.close()\ncat.close()\n", "try:\n import xml.etree.cElementTree as ET\nexcept ImportError:\n import xml.etree.ElementTree as ET\n<import token>\n\n\ndef write_to_file(file, line):\n file.write(line + '\\n')\n\n\ndef cat_map():\n catmap = {}\n id = 1\n f = open('cat')\n cat = set([s.strip() for s in list(f.readlines())])\n for i in cat:\n catmap[i] = id\n id = id + 1\n return catmap\n\n\ntree = ET.ElementTree(file='test.xml')\nroot = tree.getroot()\ncnn = open('cnn', 'a')\nlstm = open('lstm', 'a')\ncat = open('cat', 'a')\nfor vespaadd in root:\n document = vespaadd.find('document')\n if document != None:\n subject = document.find('subject')\n content = document.find('content')\n maincat = document.find('maincat')\n if subject == None:\n continue\n if content == None:\n content = subject\n if maincat == None:\n continue\n write_to_file(cnn, data_helpers.clean_str(subject.text))\n write_to_file(lstm, data_helpers.clean_str(content.text))\n write_to_file(cat, data_helpers.clean_str(maincat.text))\ncnn.close()\nlstm.close()\ncat.close()\n", "try:\n import xml.etree.cElementTree as ET\nexcept ImportError:\n import xml.etree.ElementTree as ET\n<import token>\n\n\ndef write_to_file(file, line):\n file.write(line + '\\n')\n\n\ndef cat_map():\n catmap = {}\n id = 1\n f = open('cat')\n cat = set([s.strip() for s in list(f.readlines())])\n for i in cat:\n catmap[i] = id\n id = id + 1\n return catmap\n\n\n<assignment token>\nfor vespaadd in root:\n document = vespaadd.find('document')\n if document != None:\n subject = document.find('subject')\n content = document.find('content')\n maincat = document.find('maincat')\n if subject == None:\n continue\n if content == None:\n content = subject\n if maincat == None:\n continue\n write_to_file(cnn, data_helpers.clean_str(subject.text))\n write_to_file(lstm, data_helpers.clean_str(content.text))\n write_to_file(cat, data_helpers.clean_str(maincat.text))\ncnn.close()\nlstm.close()\ncat.close()\n", "<code token>\n<import token>\n\n\ndef write_to_file(file, line):\n file.write(line + '\\n')\n\n\ndef cat_map():\n catmap = {}\n id = 1\n f = open('cat')\n cat = set([s.strip() for s in list(f.readlines())])\n for i in cat:\n catmap[i] = id\n id = id + 1\n return catmap\n\n\n<assignment token>\n<code token>\n", "<code token>\n<import token>\n\n\ndef write_to_file(file, line):\n file.write(line + '\\n')\n\n\n<function token>\n<assignment token>\n<code token>\n", "<code token>\n<import token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n" ]
false
303
631323e79f4fb32611d7094af92cff8f923fa996
#!/bin/python3 def word_ladder(start_word, end_word, dictionary_file='words5.dict'): ''' Returns a list satisfying the following properties: 1. the first element is `start_word` 2. the last element is `end_word` 3. elements at index i and i+1 are `_adjacent` 4. all elements are entries in the `dictionary_file` file For example, running the command ``` word_ladder('stone','money') ``` may give the output ``` ['stone', 'shone', 'phone', 'phony', 'peony', 'penny', 'benny', 'bonny', 'boney', 'money'] ``` but the possible outputs are not unique, so you may also get the output ``` ['stone', 'shone', 'shote', 'shots', 'soots', 'hoots', 'hooty', 'hooey', 'honey', 'money'] ``` (We cannot use doctests here because the outputs are not unique.) Whenever it is impossible to generate a word ladder between the two words, the function returns `None`. HINT: See <https://github.com/mikeizbicki/cmc-csci046/issues/472> for a discussion about a common memory management bug that causes the generated word ladders to be too long in some cases. ''' def verify_word_ladder(ladder): ''' Returns True if each entry of the input list is adjacent to its neighbors; otherwise returns False. >>> verify_word_ladder(['stone', 'shone', 'phone', 'phony']) True >>> verify_word_ladder(['stone', 'shone', 'phony']) False ''' def _adjacent(word1, word2): ''' Returns True if the input words differ by only a single character; returns False otherwise. >>> _adjacent('phone','phony') True >>> _adjacent('stone','money') False '''
[ "#!/bin/python3\n\n\ndef word_ladder(start_word, end_word, dictionary_file='words5.dict'):\n '''\n Returns a list satisfying the following properties:\n\n 1. the first element is `start_word`\n 2. the last element is `end_word`\n 3. elements at index i and i+1 are `_adjacent`\n 4. all elements are entries in the `dictionary_file` file\n\n For example, running the command\n ```\n word_ladder('stone','money')\n ```\n may give the output\n ```\n ['stone', 'shone', 'phone', 'phony', 'peony', 'penny', 'benny', 'bonny', 'boney', 'money']\n ```\n but the possible outputs are not unique,\n so you may also get the output\n ```\n ['stone', 'shone', 'shote', 'shots', 'soots', 'hoots', 'hooty', 'hooey', 'honey', 'money']\n ```\n (We cannot use doctests here because the outputs are not unique.)\n\n Whenever it is impossible to generate a word ladder between the two words,\n the function returns `None`.\n\n HINT:\n See <https://github.com/mikeizbicki/cmc-csci046/issues/472> for a discussion about a common memory management bug that causes the generated word ladders to be too long in some cases.\n '''\n\n\ndef verify_word_ladder(ladder):\n '''\n Returns True if each entry of the input list is adjacent to its neighbors;\n otherwise returns False.\n\n >>> verify_word_ladder(['stone', 'shone', 'phone', 'phony'])\n True\n >>> verify_word_ladder(['stone', 'shone', 'phony'])\n False\n '''\n\n\ndef _adjacent(word1, word2):\n '''\n Returns True if the input words differ by only a single character;\n returns False otherwise.\n\n >>> _adjacent('phone','phony')\n True\n >>> _adjacent('stone','money')\n False\n '''\n", "def word_ladder(start_word, end_word, dictionary_file='words5.dict'):\n \"\"\"\n Returns a list satisfying the following properties:\n\n 1. the first element is `start_word`\n 2. the last element is `end_word`\n 3. elements at index i and i+1 are `_adjacent`\n 4. all elements are entries in the `dictionary_file` file\n\n For example, running the command\n ```\n word_ladder('stone','money')\n ```\n may give the output\n ```\n ['stone', 'shone', 'phone', 'phony', 'peony', 'penny', 'benny', 'bonny', 'boney', 'money']\n ```\n but the possible outputs are not unique,\n so you may also get the output\n ```\n ['stone', 'shone', 'shote', 'shots', 'soots', 'hoots', 'hooty', 'hooey', 'honey', 'money']\n ```\n (We cannot use doctests here because the outputs are not unique.)\n\n Whenever it is impossible to generate a word ladder between the two words,\n the function returns `None`.\n\n HINT:\n See <https://github.com/mikeizbicki/cmc-csci046/issues/472> for a discussion about a common memory management bug that causes the generated word ladders to be too long in some cases.\n \"\"\"\n\n\ndef verify_word_ladder(ladder):\n \"\"\"\n Returns True if each entry of the input list is adjacent to its neighbors;\n otherwise returns False.\n\n >>> verify_word_ladder(['stone', 'shone', 'phone', 'phony'])\n True\n >>> verify_word_ladder(['stone', 'shone', 'phony'])\n False\n \"\"\"\n\n\ndef _adjacent(word1, word2):\n \"\"\"\n Returns True if the input words differ by only a single character;\n returns False otherwise.\n\n >>> _adjacent('phone','phony')\n True\n >>> _adjacent('stone','money')\n False\n \"\"\"\n", "<function token>\n\n\ndef verify_word_ladder(ladder):\n \"\"\"\n Returns True if each entry of the input list is adjacent to its neighbors;\n otherwise returns False.\n\n >>> verify_word_ladder(['stone', 'shone', 'phone', 'phony'])\n True\n >>> verify_word_ladder(['stone', 'shone', 'phony'])\n False\n \"\"\"\n\n\ndef _adjacent(word1, word2):\n \"\"\"\n Returns True if the input words differ by only a single character;\n returns False otherwise.\n\n >>> _adjacent('phone','phony')\n True\n >>> _adjacent('stone','money')\n False\n \"\"\"\n", "<function token>\n\n\ndef verify_word_ladder(ladder):\n \"\"\"\n Returns True if each entry of the input list is adjacent to its neighbors;\n otherwise returns False.\n\n >>> verify_word_ladder(['stone', 'shone', 'phone', 'phony'])\n True\n >>> verify_word_ladder(['stone', 'shone', 'phony'])\n False\n \"\"\"\n\n\n<function token>\n", "<function token>\n<function token>\n<function token>\n" ]
false
304
0a528fb7fe4a318af8bd3111e8d67f6af6bd7416
from typing import Tuple class TreeNode: def __init__(self, x): self.val = x self.left = None self.right = None class Solution: def lcaDeepestLeaves(self, root: TreeNode) -> TreeNode: _, lca = self.get_lca(root, 0) return lca def get_lca(self, node: TreeNode, depth: int) -> Tuple[int, TreeNode]: if not node: return depth, node left_depth, left_lca = self.get_lca(node.left, depth+1) right_depth, right_lca = self.get_lca(node.right, depth+1) if left_depth == right_depth: return left_depth, node if left_depth > right_depth: return left_depth, left_lca return right_depth, right_lca
[ "from typing import Tuple\n\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n def lcaDeepestLeaves(self, root: TreeNode) -> TreeNode:\n _, lca = self.get_lca(root, 0)\n return lca\n\n def get_lca(self, node: TreeNode, depth: int) -> Tuple[int, TreeNode]:\n if not node:\n return depth, node\n left_depth, left_lca = self.get_lca(node.left, depth+1)\n right_depth, right_lca = self.get_lca(node.right, depth+1)\n if left_depth == right_depth:\n return left_depth, node\n if left_depth > right_depth:\n return left_depth, left_lca\n return right_depth, right_lca\n", "from typing import Tuple\n\n\nclass TreeNode:\n\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n\n def lcaDeepestLeaves(self, root: TreeNode) ->TreeNode:\n _, lca = self.get_lca(root, 0)\n return lca\n\n def get_lca(self, node: TreeNode, depth: int) ->Tuple[int, TreeNode]:\n if not node:\n return depth, node\n left_depth, left_lca = self.get_lca(node.left, depth + 1)\n right_depth, right_lca = self.get_lca(node.right, depth + 1)\n if left_depth == right_depth:\n return left_depth, node\n if left_depth > right_depth:\n return left_depth, left_lca\n return right_depth, right_lca\n", "<import token>\n\n\nclass TreeNode:\n\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n\n def lcaDeepestLeaves(self, root: TreeNode) ->TreeNode:\n _, lca = self.get_lca(root, 0)\n return lca\n\n def get_lca(self, node: TreeNode, depth: int) ->Tuple[int, TreeNode]:\n if not node:\n return depth, node\n left_depth, left_lca = self.get_lca(node.left, depth + 1)\n right_depth, right_lca = self.get_lca(node.right, depth + 1)\n if left_depth == right_depth:\n return left_depth, node\n if left_depth > right_depth:\n return left_depth, left_lca\n return right_depth, right_lca\n", "<import token>\n\n\nclass TreeNode:\n <function token>\n\n\nclass Solution:\n\n def lcaDeepestLeaves(self, root: TreeNode) ->TreeNode:\n _, lca = self.get_lca(root, 0)\n return lca\n\n def get_lca(self, node: TreeNode, depth: int) ->Tuple[int, TreeNode]:\n if not node:\n return depth, node\n left_depth, left_lca = self.get_lca(node.left, depth + 1)\n right_depth, right_lca = self.get_lca(node.right, depth + 1)\n if left_depth == right_depth:\n return left_depth, node\n if left_depth > right_depth:\n return left_depth, left_lca\n return right_depth, right_lca\n", "<import token>\n<class token>\n\n\nclass Solution:\n\n def lcaDeepestLeaves(self, root: TreeNode) ->TreeNode:\n _, lca = self.get_lca(root, 0)\n return lca\n\n def get_lca(self, node: TreeNode, depth: int) ->Tuple[int, TreeNode]:\n if not node:\n return depth, node\n left_depth, left_lca = self.get_lca(node.left, depth + 1)\n right_depth, right_lca = self.get_lca(node.right, depth + 1)\n if left_depth == right_depth:\n return left_depth, node\n if left_depth > right_depth:\n return left_depth, left_lca\n return right_depth, right_lca\n", "<import token>\n<class token>\n\n\nclass Solution:\n\n def lcaDeepestLeaves(self, root: TreeNode) ->TreeNode:\n _, lca = self.get_lca(root, 0)\n return lca\n <function token>\n", "<import token>\n<class token>\n\n\nclass Solution:\n <function token>\n <function token>\n", "<import token>\n<class token>\n<class token>\n" ]
false
305
371762a6e3f8b8ed14742a70a709da224ae6712b
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import random """ 检查图数据和结构或者链表数据结构中是否存在环 https://www.youtube.com/watch?v=YKE4Vd1ysPI """ def get_all_edge(graph): result = [] for k, v in graph.items(): for i in v: if sorted((k, i)) not in result: result.append(sorted((k, i))) return result def bfs(graph, start): result = [] queue = [] seen = set() queue.append(start) seen.add(start) while len(queue): vertex = queue.pop(0) nodes = graph[vertex] for node in nodes: if node not in seen: queue.append(node) seen.add(node) result.append(vertex) return result if __name__ == '__main__': graph = { '0': ['1'], '1': ['0', '2', '3'], '2': ['1', '4', '5'], '3': ['1', '4'], '4': ['2', '3'], '5': ['2'], } all_edge = get_all_edge(graph) random.shuffle(all_edge) print(all_edge) print(bfs(graph, '0')) # ['0', '1', '2', '3', '4', '5'] """ ['0', '1', '2', '3', '4', '5'] [['1', '3'], ['3', '4'], ['2', '5'], ['1', '2'], ['0', '1'], ['2', '4']] ['1', '3'] disjoint set 1, 3 ['3', '4'] -> 一个点在集合中,另一个不在集合中 disjoint set 1, 3, 4 ['2', '5'] -> 两个点都不在同一集合中 disjoint set 1 1, 3, 4 disjoint set 2 2, 5 ['1', '2'] -> 两个点分别在不同的集合中,合并集合 disjoint set 1, 3, 4, 2, 5 ['0', '1'] -> 一个点在集合中,另一个不在集合中 disjoint set 1, 3, 4, 2, 5, 0 ['2', '4'] -> 两个点都在同一个集合中,说明有环 """ graph = { '0': ['1'], '1': ['0', '2', '3'], '2': ['1', '5'], '3': ['1', '4'], '4': ['3'], '5': ['2'], } all_edge = get_all_edge(graph) random.shuffle(all_edge) print(all_edge) print(bfs(graph, '0')) # ['0', '1', '2', '3', '4', '5'] """ [['3', '4'], ['1', '3'], ['0', '1'], ['1', '2'], ['2', '5']] ['0', '1', '2', '3', '5', '4'] ['3', '4'] disjoint set 3, 4 ['1', '3'] disjoint set 3, 4, 1 ['0', '1'] disjoint set 3, 4, 1, 0 ['1', '2'] disjoint set 3, 4, 1, 0, 2 ['2', '5'] disjoint set 3, 4, 1, 0, 2, 5 图中无环 """ graph = { '0': ['1'], '1': ['0', '2'], '2': ['1', '3'], '3': ['2'] } all_edge = get_all_edge(graph) random.shuffle(all_edge) print(all_edge) print(bfs(graph, '0')) """ [['2', '3'], ['0', '1'], ['1', '2']] ['0', '1', '2', '3'] ['2', '3'] disjoint set 2, 3 ['0', '1'] disjoint set1 2, 3 disjoint set2 0, 1 ['1', '2'] disjoint set 0, 1, 2, 3 链表中无环 """ graph = { '0': ['1'], '1': ['0', '2'], '2': ['1', '3'], '3': ['2', '4', '6'], '4': ['3', '5'], '5': ['4', '6'], '6': ['3', '5'] } all_edge = get_all_edge(graph) random.shuffle(all_edge) print(all_edge) print(bfs(graph, '0')) """ [['2', '3'], ['5', '6'], ['3', '4'], ['0', '1'], ['1', '2'], ['3', '6'], ['4', '5']] ['0', '1', '2', '3', '4', '6', '5'] ['2', '3'] disjoint set 2, 3 ['5', '6'] disjoint set1 2, 3 disjoint set2 5, 6 ['3', '4'] disjoint set1 2, 3, 4 disjoint set2 5, 6 ['0', '1'] disjoint set1 2, 3, 4 disjoint set2 5, 6 disjoint set3 0, 1 ['1', '2'] disjoint set1 2, 3, 4, 1, 0 disjoint set2 5, 6 ['3', '6'] disjoint set 2, 3, 4, 1, 0, 5, 6 ['4', '5'] 链表中有环 """
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport random\n\n\"\"\"\n检查图数据和结构或者链表数据结构中是否存在环\n\nhttps://www.youtube.com/watch?v=YKE4Vd1ysPI\n\n\"\"\"\n\n\ndef get_all_edge(graph):\n result = []\n for k, v in graph.items():\n for i in v:\n if sorted((k, i)) not in result:\n result.append(sorted((k, i)))\n return result\n\n\ndef bfs(graph, start):\n result = []\n queue = []\n seen = set()\n queue.append(start)\n seen.add(start)\n while len(queue):\n vertex = queue.pop(0)\n nodes = graph[vertex]\n for node in nodes:\n if node not in seen:\n queue.append(node)\n seen.add(node)\n result.append(vertex)\n return result\n\n\nif __name__ == '__main__':\n graph = {\n '0': ['1'],\n '1': ['0', '2', '3'],\n '2': ['1', '4', '5'],\n '3': ['1', '4'],\n '4': ['2', '3'],\n '5': ['2'],\n }\n all_edge = get_all_edge(graph)\n random.shuffle(all_edge)\n print(all_edge)\n print(bfs(graph, '0')) # ['0', '1', '2', '3', '4', '5']\n \"\"\"\n ['0', '1', '2', '3', '4', '5']\n [['1', '3'], ['3', '4'], ['2', '5'], ['1', '2'], ['0', '1'], ['2', '4']]\n \n ['1', '3']\n disjoint set\n 1, 3\n \n ['3', '4'] -> 一个点在集合中,另一个不在集合中\n disjoint set\n 1, 3, 4\n \n ['2', '5'] -> 两个点都不在同一集合中\n disjoint set 1\n 1, 3, 4\n disjoint set 2\n 2, 5\n \n ['1', '2'] -> 两个点分别在不同的集合中,合并集合\n disjoint set\n 1, 3, 4, 2, 5\n \n ['0', '1'] -> 一个点在集合中,另一个不在集合中\n disjoint set\n 1, 3, 4, 2, 5, 0\n \n ['2', '4'] -> 两个点都在同一个集合中,说明有环\n \n \"\"\"\n\n graph = {\n '0': ['1'],\n '1': ['0', '2', '3'],\n '2': ['1', '5'],\n '3': ['1', '4'],\n '4': ['3'],\n '5': ['2'],\n }\n all_edge = get_all_edge(graph)\n random.shuffle(all_edge)\n print(all_edge)\n print(bfs(graph, '0')) # ['0', '1', '2', '3', '4', '5']\n \"\"\"\n [['3', '4'], ['1', '3'], ['0', '1'], ['1', '2'], ['2', '5']]\n ['0', '1', '2', '3', '5', '4']\n \n ['3', '4']\n disjoint set\n 3, 4\n \n ['1', '3']\n disjoint set\n 3, 4, 1\n \n ['0', '1']\n disjoint set\n 3, 4, 1, 0\n \n ['1', '2']\n disjoint set\n 3, 4, 1, 0, 2\n \n ['2', '5']\n disjoint set\n 3, 4, 1, 0, 2, 5\n \n 图中无环\n \n \"\"\"\n\n graph = {\n '0': ['1'],\n '1': ['0', '2'],\n '2': ['1', '3'],\n '3': ['2']\n }\n all_edge = get_all_edge(graph)\n random.shuffle(all_edge)\n print(all_edge)\n print(bfs(graph, '0'))\n \"\"\"\n [['2', '3'], ['0', '1'], ['1', '2']]\n ['0', '1', '2', '3']\n \n ['2', '3']\n disjoint set\n 2, 3\n \n ['0', '1']\n disjoint set1\n 2, 3\n disjoint set2\n 0, 1\n \n ['1', '2']\n disjoint set\n 0, 1, 2, 3\n \n 链表中无环\n\n \"\"\"\n\n graph = {\n '0': ['1'],\n '1': ['0', '2'],\n '2': ['1', '3'],\n '3': ['2', '4', '6'],\n '4': ['3', '5'],\n '5': ['4', '6'],\n '6': ['3', '5']\n }\n all_edge = get_all_edge(graph)\n random.shuffle(all_edge)\n print(all_edge)\n print(bfs(graph, '0'))\n \"\"\"\n [['2', '3'], ['5', '6'], ['3', '4'], ['0', '1'], ['1', '2'], ['3', '6'], ['4', '5']]\n ['0', '1', '2', '3', '4', '6', '5']\n \n ['2', '3']\n disjoint set\n 2, 3\n \n ['5', '6']\n disjoint set1\n 2, 3\n disjoint set2\n 5, 6\n \n ['3', '4']\n disjoint set1\n 2, 3, 4\n disjoint set2\n 5, 6\n \n ['0', '1']\n disjoint set1\n 2, 3, 4\n disjoint set2\n 5, 6\n disjoint set3\n 0, 1\n \n ['1', '2']\n disjoint set1\n 2, 3, 4, 1, 0\n disjoint set2\n 5, 6\n \n ['3', '6']\n disjoint set\n 2, 3, 4, 1, 0, 5, 6\n \n ['4', '5'] 链表中有环\n\n \"\"\"\n", "import random\n<docstring token>\n\n\ndef get_all_edge(graph):\n result = []\n for k, v in graph.items():\n for i in v:\n if sorted((k, i)) not in result:\n result.append(sorted((k, i)))\n return result\n\n\ndef bfs(graph, start):\n result = []\n queue = []\n seen = set()\n queue.append(start)\n seen.add(start)\n while len(queue):\n vertex = queue.pop(0)\n nodes = graph[vertex]\n for node in nodes:\n if node not in seen:\n queue.append(node)\n seen.add(node)\n result.append(vertex)\n return result\n\n\nif __name__ == '__main__':\n graph = {'0': ['1'], '1': ['0', '2', '3'], '2': ['1', '4', '5'], '3': [\n '1', '4'], '4': ['2', '3'], '5': ['2']}\n all_edge = get_all_edge(graph)\n random.shuffle(all_edge)\n print(all_edge)\n print(bfs(graph, '0'))\n \"\"\"\n ['0', '1', '2', '3', '4', '5']\n [['1', '3'], ['3', '4'], ['2', '5'], ['1', '2'], ['0', '1'], ['2', '4']]\n \n ['1', '3']\n disjoint set\n 1, 3\n \n ['3', '4'] -> 一个点在集合中,另一个不在集合中\n disjoint set\n 1, 3, 4\n \n ['2', '5'] -> 两个点都不在同一集合中\n disjoint set 1\n 1, 3, 4\n disjoint set 2\n 2, 5\n \n ['1', '2'] -> 两个点分别在不同的集合中,合并集合\n disjoint set\n 1, 3, 4, 2, 5\n \n ['0', '1'] -> 一个点在集合中,另一个不在集合中\n disjoint set\n 1, 3, 4, 2, 5, 0\n \n ['2', '4'] -> 两个点都在同一个集合中,说明有环\n \n \"\"\"\n graph = {'0': ['1'], '1': ['0', '2', '3'], '2': ['1', '5'], '3': ['1',\n '4'], '4': ['3'], '5': ['2']}\n all_edge = get_all_edge(graph)\n random.shuffle(all_edge)\n print(all_edge)\n print(bfs(graph, '0'))\n \"\"\"\n [['3', '4'], ['1', '3'], ['0', '1'], ['1', '2'], ['2', '5']]\n ['0', '1', '2', '3', '5', '4']\n \n ['3', '4']\n disjoint set\n 3, 4\n \n ['1', '3']\n disjoint set\n 3, 4, 1\n \n ['0', '1']\n disjoint set\n 3, 4, 1, 0\n \n ['1', '2']\n disjoint set\n 3, 4, 1, 0, 2\n \n ['2', '5']\n disjoint set\n 3, 4, 1, 0, 2, 5\n \n 图中无环\n \n \"\"\"\n graph = {'0': ['1'], '1': ['0', '2'], '2': ['1', '3'], '3': ['2']}\n all_edge = get_all_edge(graph)\n random.shuffle(all_edge)\n print(all_edge)\n print(bfs(graph, '0'))\n \"\"\"\n [['2', '3'], ['0', '1'], ['1', '2']]\n ['0', '1', '2', '3']\n \n ['2', '3']\n disjoint set\n 2, 3\n \n ['0', '1']\n disjoint set1\n 2, 3\n disjoint set2\n 0, 1\n \n ['1', '2']\n disjoint set\n 0, 1, 2, 3\n \n 链表中无环\n\n \"\"\"\n graph = {'0': ['1'], '1': ['0', '2'], '2': ['1', '3'], '3': ['2', '4',\n '6'], '4': ['3', '5'], '5': ['4', '6'], '6': ['3', '5']}\n all_edge = get_all_edge(graph)\n random.shuffle(all_edge)\n print(all_edge)\n print(bfs(graph, '0'))\n \"\"\"\n [['2', '3'], ['5', '6'], ['3', '4'], ['0', '1'], ['1', '2'], ['3', '6'], ['4', '5']]\n ['0', '1', '2', '3', '4', '6', '5']\n \n ['2', '3']\n disjoint set\n 2, 3\n \n ['5', '6']\n disjoint set1\n 2, 3\n disjoint set2\n 5, 6\n \n ['3', '4']\n disjoint set1\n 2, 3, 4\n disjoint set2\n 5, 6\n \n ['0', '1']\n disjoint set1\n 2, 3, 4\n disjoint set2\n 5, 6\n disjoint set3\n 0, 1\n \n ['1', '2']\n disjoint set1\n 2, 3, 4, 1, 0\n disjoint set2\n 5, 6\n \n ['3', '6']\n disjoint set\n 2, 3, 4, 1, 0, 5, 6\n \n ['4', '5'] 链表中有环\n\n \"\"\"\n", "<import token>\n<docstring token>\n\n\ndef get_all_edge(graph):\n result = []\n for k, v in graph.items():\n for i in v:\n if sorted((k, i)) not in result:\n result.append(sorted((k, i)))\n return result\n\n\ndef bfs(graph, start):\n result = []\n queue = []\n seen = set()\n queue.append(start)\n seen.add(start)\n while len(queue):\n vertex = queue.pop(0)\n nodes = graph[vertex]\n for node in nodes:\n if node not in seen:\n queue.append(node)\n seen.add(node)\n result.append(vertex)\n return result\n\n\nif __name__ == '__main__':\n graph = {'0': ['1'], '1': ['0', '2', '3'], '2': ['1', '4', '5'], '3': [\n '1', '4'], '4': ['2', '3'], '5': ['2']}\n all_edge = get_all_edge(graph)\n random.shuffle(all_edge)\n print(all_edge)\n print(bfs(graph, '0'))\n \"\"\"\n ['0', '1', '2', '3', '4', '5']\n [['1', '3'], ['3', '4'], ['2', '5'], ['1', '2'], ['0', '1'], ['2', '4']]\n \n ['1', '3']\n disjoint set\n 1, 3\n \n ['3', '4'] -> 一个点在集合中,另一个不在集合中\n disjoint set\n 1, 3, 4\n \n ['2', '5'] -> 两个点都不在同一集合中\n disjoint set 1\n 1, 3, 4\n disjoint set 2\n 2, 5\n \n ['1', '2'] -> 两个点分别在不同的集合中,合并集合\n disjoint set\n 1, 3, 4, 2, 5\n \n ['0', '1'] -> 一个点在集合中,另一个不在集合中\n disjoint set\n 1, 3, 4, 2, 5, 0\n \n ['2', '4'] -> 两个点都在同一个集合中,说明有环\n \n \"\"\"\n graph = {'0': ['1'], '1': ['0', '2', '3'], '2': ['1', '5'], '3': ['1',\n '4'], '4': ['3'], '5': ['2']}\n all_edge = get_all_edge(graph)\n random.shuffle(all_edge)\n print(all_edge)\n print(bfs(graph, '0'))\n \"\"\"\n [['3', '4'], ['1', '3'], ['0', '1'], ['1', '2'], ['2', '5']]\n ['0', '1', '2', '3', '5', '4']\n \n ['3', '4']\n disjoint set\n 3, 4\n \n ['1', '3']\n disjoint set\n 3, 4, 1\n \n ['0', '1']\n disjoint set\n 3, 4, 1, 0\n \n ['1', '2']\n disjoint set\n 3, 4, 1, 0, 2\n \n ['2', '5']\n disjoint set\n 3, 4, 1, 0, 2, 5\n \n 图中无环\n \n \"\"\"\n graph = {'0': ['1'], '1': ['0', '2'], '2': ['1', '3'], '3': ['2']}\n all_edge = get_all_edge(graph)\n random.shuffle(all_edge)\n print(all_edge)\n print(bfs(graph, '0'))\n \"\"\"\n [['2', '3'], ['0', '1'], ['1', '2']]\n ['0', '1', '2', '3']\n \n ['2', '3']\n disjoint set\n 2, 3\n \n ['0', '1']\n disjoint set1\n 2, 3\n disjoint set2\n 0, 1\n \n ['1', '2']\n disjoint set\n 0, 1, 2, 3\n \n 链表中无环\n\n \"\"\"\n graph = {'0': ['1'], '1': ['0', '2'], '2': ['1', '3'], '3': ['2', '4',\n '6'], '4': ['3', '5'], '5': ['4', '6'], '6': ['3', '5']}\n all_edge = get_all_edge(graph)\n random.shuffle(all_edge)\n print(all_edge)\n print(bfs(graph, '0'))\n \"\"\"\n [['2', '3'], ['5', '6'], ['3', '4'], ['0', '1'], ['1', '2'], ['3', '6'], ['4', '5']]\n ['0', '1', '2', '3', '4', '6', '5']\n \n ['2', '3']\n disjoint set\n 2, 3\n \n ['5', '6']\n disjoint set1\n 2, 3\n disjoint set2\n 5, 6\n \n ['3', '4']\n disjoint set1\n 2, 3, 4\n disjoint set2\n 5, 6\n \n ['0', '1']\n disjoint set1\n 2, 3, 4\n disjoint set2\n 5, 6\n disjoint set3\n 0, 1\n \n ['1', '2']\n disjoint set1\n 2, 3, 4, 1, 0\n disjoint set2\n 5, 6\n \n ['3', '6']\n disjoint set\n 2, 3, 4, 1, 0, 5, 6\n \n ['4', '5'] 链表中有环\n\n \"\"\"\n", "<import token>\n<docstring token>\n\n\ndef get_all_edge(graph):\n result = []\n for k, v in graph.items():\n for i in v:\n if sorted((k, i)) not in result:\n result.append(sorted((k, i)))\n return result\n\n\ndef bfs(graph, start):\n result = []\n queue = []\n seen = set()\n queue.append(start)\n seen.add(start)\n while len(queue):\n vertex = queue.pop(0)\n nodes = graph[vertex]\n for node in nodes:\n if node not in seen:\n queue.append(node)\n seen.add(node)\n result.append(vertex)\n return result\n\n\n<code token>\n", "<import token>\n<docstring token>\n\n\ndef get_all_edge(graph):\n result = []\n for k, v in graph.items():\n for i in v:\n if sorted((k, i)) not in result:\n result.append(sorted((k, i)))\n return result\n\n\n<function token>\n<code token>\n", "<import token>\n<docstring token>\n<function token>\n<function token>\n<code token>\n" ]
false
306
7f7ebc6d3d69fbb19071c63a9ab235ad01f1d414
import sys sys.path.append("..") import helpers helpers.mask_busy_gpus(wait=False) import nltk import numpy as np nltk.download('brown') nltk.download('universal_tagset') data = nltk.corpus.brown.tagged_sents(tagset='universal') all_tags = ['#EOS#','#UNK#','ADV', 'NOUN', 'ADP', 'PRON', 'DET', '.', 'PRT', 'VERB', 'X', 'NUM', 'CONJ', 'ADJ'] data = np.array([ [(word.lower(),tag) for word,tag in sentence] for sentence in data ]) from sklearn.cross_validation import train_test_split train_data,test_data = train_test_split(data,test_size=0.25,random_state=42) from collections import Counter word_counts = Counter() for sentence in data: words,tags = zip(*sentence) word_counts.update(words) all_words = ['#EOS#','#UNK#']+list(list(zip(*word_counts.most_common(10000)))[0]) #print(all_words) #let's measure what fraction of data words are in the dictionary print("Coverage = %.5f"%(float(sum(word_counts[w] for w in all_words)) / sum(word_counts.values()))) from collections import defaultdict word_to_id = defaultdict(lambda:1,{word:i for i,word in enumerate(all_words)}) tag_to_id = {tag:i for i,tag in enumerate(all_tags)} def to_matrix(lines,token_to_id,max_len=None,pad=0,dtype='int32',time_major=False): """Converts a list of names into rnn-digestable matrix with paddings added after the end""" max_len = max_len or max(map(len,lines)) matrix = np.empty([len(lines),max_len],dtype) matrix.fill(pad) for i in range(len(lines)): line_ix = list(map(token_to_id.__getitem__,lines[i]))[:max_len] matrix[i,:len(line_ix)] = line_ix return matrix.T if time_major else matrix batch_words,batch_tags = zip(*[zip(*sentence) for sentence in data[-3:]]) print("Word ids:") print(to_matrix(batch_words,word_to_id)) print("Tag ids:") print(to_matrix(batch_tags,tag_to_id)) import keras import keras.layers as L from keras.utils.np_utils import to_categorical BATCH_SIZE=32 def generate_batches(sentences,batch_size=BATCH_SIZE,max_len=None,pad=0): assert isinstance(sentences,np.ndarray),"Make sure sentences is q numpy array" while True: indices = np.random.permutation(np.arange(len(sentences))) for start in range(0,len(indices)-1,batch_size): batch_indices = indices[start:start+batch_size] batch_words,batch_tags = [],[] for sent in sentences[batch_indices]: words,tags = zip(*sent) batch_words.append(words) batch_tags.append(tags) batch_words = to_matrix(batch_words,word_to_id,max_len,pad) batch_tags = to_matrix(batch_tags,tag_to_id,max_len,pad) batch_tags_1hot = to_categorical(batch_tags,len(all_tags)).reshape(batch_tags.shape+(-1,)) yield batch_words,batch_tags_1hot def compute_test_accuracy(model): test_words,test_tags = zip(*[zip(*sentence) for sentence in test_data]) test_words,test_tags = to_matrix(test_words,word_to_id),to_matrix(test_tags,tag_to_id) #predict tag probabilities of shape [batch,time,n_tags] predicted_tag_probabilities = model.predict(test_words,verbose=1) predicted_tags = predicted_tag_probabilities.argmax(axis=-1) #compute accurary excluding padding numerator = np.sum(np.logical_and((predicted_tags == test_tags),(test_words != 0))) denominator = np.sum(test_words != 0) return float(numerator)/denominator class EvaluateAccuracy(keras.callbacks.Callback): def on_epoch_end(self,epoch,logs=None): sys.stdout.flush() print("\nMeasuring validation accuracy...") acc = compute_test_accuracy(self.model) print("\nValidation accuracy: %.5f\n"%acc) sys.stdout.flush() model = keras.models.Sequential() model = keras.models.Sequential() model.add(L.InputLayer([None],dtype='int32')) model.add(L.Embedding(len(all_words),50)) model.add(L.TimeDistributed(L.Dense(96,activation='tanh'))) model.add(L.Dropout(0.25)) model.add(L.TimeDistributed(L.Dense(96,activation='tanh'))) model.add(L.Dropout(0.25)) #model.add(L.Conv1D(32,3,padding='same',activation='tanh')) model.add(L.Bidirectional(L.GRU(128,return_sequences=True,activation='tanh',recurrent_dropout=0.2,dropout=0.2))) model.add(L.TimeDistributed(L.Dense(128,activation='tanh'))) model.add(L.Dropout(0.25)) model.add(L.TimeDistributed(L.Dense(128,activation='tanh'))) model.add(L.Dropout(0.25)) model.add(L.Bidirectional(L.GRU(128,return_sequences=True,activation='tanh',recurrent_dropout=0.2,dropout=0.2))) model.add(L.TimeDistributed(L.Dense(128,activation='tanh'))) model.add(L.Dropout(0.25)) model.add(L.TimeDistributed(L.Dense(128,activation='tanh'))) model.add(L.Dropout(0.25)) # # model.add(L.Bidirectional(L.GRU(128,return_sequences=True,activation='tanh',recurrent_dropout=0.2,dropout=0.2))) model.add(L.Conv1D(128,2,padding='same',activation='tanh')) model.add(L.Dropout(0.2)) model.add(L.Conv1D(128,3,padding='same',activation='tanh')) model.add(L.Dropout(0.2)) model.add(L.Conv1D(128,4,padding='same',activation='tanh')) model.add(L.TimeDistributed(L.Dense(256,activation='tanh'))) model.add(L.Dropout(0.25)) #model.add(L.TimeDistributed(L.Dense(128,activation='tanh'))) #model.add(L.Dropout(0.25)) stepwise_dense = L.Dense(len(all_tags),activation='softmax') stepwise_dense = L.TimeDistributed(stepwise_dense) model.add(stepwise_dense) model.summary() model.compile('adam','categorical_crossentropy') model.fit_generator(generate_batches(train_data),len(train_data)/BATCH_SIZE, callbacks=[EvaluateAccuracy()], epochs=50,) acc = compute_test_accuracy(model) print("\nFinal accuracy: %.5f"%acc) model.save_weights("LSTM_gpu_trained_weights_1layer.h5")
[ "import sys\nsys.path.append(\"..\")\nimport helpers\nhelpers.mask_busy_gpus(wait=False)\n\n\n\nimport nltk\n\nimport numpy as np\nnltk.download('brown')\nnltk.download('universal_tagset')\ndata = nltk.corpus.brown.tagged_sents(tagset='universal')\nall_tags = ['#EOS#','#UNK#','ADV', 'NOUN', 'ADP', 'PRON', 'DET', '.', 'PRT', 'VERB', 'X', 'NUM', 'CONJ', 'ADJ']\n\ndata = np.array([ [(word.lower(),tag) for word,tag in sentence] for sentence in data ])\n\nfrom sklearn.cross_validation import train_test_split\ntrain_data,test_data = train_test_split(data,test_size=0.25,random_state=42)\n\nfrom collections import Counter\nword_counts = Counter()\nfor sentence in data:\n words,tags = zip(*sentence)\n \n word_counts.update(words)\n\nall_words = ['#EOS#','#UNK#']+list(list(zip(*word_counts.most_common(10000)))[0])\n#print(all_words)\n#let's measure what fraction of data words are in the dictionary\nprint(\"Coverage = %.5f\"%(float(sum(word_counts[w] for w in all_words)) / sum(word_counts.values())))\n\nfrom collections import defaultdict\nword_to_id = defaultdict(lambda:1,{word:i for i,word in enumerate(all_words)})\ntag_to_id = {tag:i for i,tag in enumerate(all_tags)}\n\ndef to_matrix(lines,token_to_id,max_len=None,pad=0,dtype='int32',time_major=False):\n \"\"\"Converts a list of names into rnn-digestable matrix with paddings added after the end\"\"\"\n \n max_len = max_len or max(map(len,lines))\n matrix = np.empty([len(lines),max_len],dtype)\n matrix.fill(pad)\n\n for i in range(len(lines)):\n line_ix = list(map(token_to_id.__getitem__,lines[i]))[:max_len]\n matrix[i,:len(line_ix)] = line_ix\n\n return matrix.T if time_major else matrix\n\nbatch_words,batch_tags = zip(*[zip(*sentence) for sentence in data[-3:]])\n\nprint(\"Word ids:\")\nprint(to_matrix(batch_words,word_to_id))\nprint(\"Tag ids:\")\nprint(to_matrix(batch_tags,tag_to_id))\n\nimport keras\nimport keras.layers as L\n\nfrom keras.utils.np_utils import to_categorical\nBATCH_SIZE=32\ndef generate_batches(sentences,batch_size=BATCH_SIZE,max_len=None,pad=0):\n assert isinstance(sentences,np.ndarray),\"Make sure sentences is q numpy array\"\n \n while True:\n indices = np.random.permutation(np.arange(len(sentences)))\n for start in range(0,len(indices)-1,batch_size):\n batch_indices = indices[start:start+batch_size]\n batch_words,batch_tags = [],[]\n for sent in sentences[batch_indices]:\n words,tags = zip(*sent)\n batch_words.append(words)\n batch_tags.append(tags)\n\n batch_words = to_matrix(batch_words,word_to_id,max_len,pad)\n batch_tags = to_matrix(batch_tags,tag_to_id,max_len,pad)\n\n batch_tags_1hot = to_categorical(batch_tags,len(all_tags)).reshape(batch_tags.shape+(-1,))\n yield batch_words,batch_tags_1hot\n \ndef compute_test_accuracy(model):\n test_words,test_tags = zip(*[zip(*sentence) for sentence in test_data])\n test_words,test_tags = to_matrix(test_words,word_to_id),to_matrix(test_tags,tag_to_id)\n\n #predict tag probabilities of shape [batch,time,n_tags]\n predicted_tag_probabilities = model.predict(test_words,verbose=1)\n predicted_tags = predicted_tag_probabilities.argmax(axis=-1)\n\n #compute accurary excluding padding\n numerator = np.sum(np.logical_and((predicted_tags == test_tags),(test_words != 0)))\n denominator = np.sum(test_words != 0)\n return float(numerator)/denominator\n\n\nclass EvaluateAccuracy(keras.callbacks.Callback):\n def on_epoch_end(self,epoch,logs=None):\n sys.stdout.flush()\n print(\"\\nMeasuring validation accuracy...\")\n acc = compute_test_accuracy(self.model)\n print(\"\\nValidation accuracy: %.5f\\n\"%acc)\n sys.stdout.flush()\n\n\nmodel = keras.models.Sequential()\n\nmodel = keras.models.Sequential()\nmodel.add(L.InputLayer([None],dtype='int32'))\nmodel.add(L.Embedding(len(all_words),50))\nmodel.add(L.TimeDistributed(L.Dense(96,activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(96,activation='tanh')))\nmodel.add(L.Dropout(0.25))\n#model.add(L.Conv1D(32,3,padding='same',activation='tanh'))\nmodel.add(L.Bidirectional(L.GRU(128,return_sequences=True,activation='tanh',recurrent_dropout=0.2,dropout=0.2)))\n\nmodel.add(L.TimeDistributed(L.Dense(128,activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(128,activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.Bidirectional(L.GRU(128,return_sequences=True,activation='tanh',recurrent_dropout=0.2,dropout=0.2)))\nmodel.add(L.TimeDistributed(L.Dense(128,activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(128,activation='tanh')))\nmodel.add(L.Dropout(0.25))\n#\n\n#\nmodel.add(L.Bidirectional(L.GRU(128,return_sequences=True,activation='tanh',recurrent_dropout=0.2,dropout=0.2)))\n\nmodel.add(L.Conv1D(128,2,padding='same',activation='tanh'))\nmodel.add(L.Dropout(0.2))\nmodel.add(L.Conv1D(128,3,padding='same',activation='tanh'))\nmodel.add(L.Dropout(0.2))\nmodel.add(L.Conv1D(128,4,padding='same',activation='tanh'))\nmodel.add(L.TimeDistributed(L.Dense(256,activation='tanh')))\nmodel.add(L.Dropout(0.25))\n#model.add(L.TimeDistributed(L.Dense(128,activation='tanh')))\n#model.add(L.Dropout(0.25))\n\nstepwise_dense = L.Dense(len(all_tags),activation='softmax')\nstepwise_dense = L.TimeDistributed(stepwise_dense)\nmodel.add(stepwise_dense)\n\nmodel.summary()\nmodel.compile('adam','categorical_crossentropy')\n\nmodel.fit_generator(generate_batches(train_data),len(train_data)/BATCH_SIZE,\n callbacks=[EvaluateAccuracy()], epochs=50,)\n\n\nacc = compute_test_accuracy(model)\nprint(\"\\nFinal accuracy: %.5f\"%acc)\n\nmodel.save_weights(\"LSTM_gpu_trained_weights_1layer.h5\")\n", "import sys\nsys.path.append('..')\nimport helpers\nhelpers.mask_busy_gpus(wait=False)\nimport nltk\nimport numpy as np\nnltk.download('brown')\nnltk.download('universal_tagset')\ndata = nltk.corpus.brown.tagged_sents(tagset='universal')\nall_tags = ['#EOS#', '#UNK#', 'ADV', 'NOUN', 'ADP', 'PRON', 'DET', '.',\n 'PRT', 'VERB', 'X', 'NUM', 'CONJ', 'ADJ']\ndata = np.array([[(word.lower(), tag) for word, tag in sentence] for\n sentence in data])\nfrom sklearn.cross_validation import train_test_split\ntrain_data, test_data = train_test_split(data, test_size=0.25, random_state=42)\nfrom collections import Counter\nword_counts = Counter()\nfor sentence in data:\n words, tags = zip(*sentence)\n word_counts.update(words)\nall_words = ['#EOS#', '#UNK#'] + list(list(zip(*word_counts.most_common(\n 10000)))[0])\nprint('Coverage = %.5f' % (float(sum(word_counts[w] for w in all_words)) /\n sum(word_counts.values())))\nfrom collections import defaultdict\nword_to_id = defaultdict(lambda : 1, {word: i for i, word in enumerate(\n all_words)})\ntag_to_id = {tag: i for i, tag in enumerate(all_tags)}\n\n\ndef to_matrix(lines, token_to_id, max_len=None, pad=0, dtype='int32',\n time_major=False):\n \"\"\"Converts a list of names into rnn-digestable matrix with paddings added after the end\"\"\"\n max_len = max_len or max(map(len, lines))\n matrix = np.empty([len(lines), max_len], dtype)\n matrix.fill(pad)\n for i in range(len(lines)):\n line_ix = list(map(token_to_id.__getitem__, lines[i]))[:max_len]\n matrix[i, :len(line_ix)] = line_ix\n return matrix.T if time_major else matrix\n\n\nbatch_words, batch_tags = zip(*[zip(*sentence) for sentence in data[-3:]])\nprint('Word ids:')\nprint(to_matrix(batch_words, word_to_id))\nprint('Tag ids:')\nprint(to_matrix(batch_tags, tag_to_id))\nimport keras\nimport keras.layers as L\nfrom keras.utils.np_utils import to_categorical\nBATCH_SIZE = 32\n\n\ndef generate_batches(sentences, batch_size=BATCH_SIZE, max_len=None, pad=0):\n assert isinstance(sentences, np.ndarray\n ), 'Make sure sentences is q numpy array'\n while True:\n indices = np.random.permutation(np.arange(len(sentences)))\n for start in range(0, len(indices) - 1, batch_size):\n batch_indices = indices[start:start + batch_size]\n batch_words, batch_tags = [], []\n for sent in sentences[batch_indices]:\n words, tags = zip(*sent)\n batch_words.append(words)\n batch_tags.append(tags)\n batch_words = to_matrix(batch_words, word_to_id, max_len, pad)\n batch_tags = to_matrix(batch_tags, tag_to_id, max_len, pad)\n batch_tags_1hot = to_categorical(batch_tags, len(all_tags)\n ).reshape(batch_tags.shape + (-1,))\n yield batch_words, batch_tags_1hot\n\n\ndef compute_test_accuracy(model):\n test_words, test_tags = zip(*[zip(*sentence) for sentence in test_data])\n test_words, test_tags = to_matrix(test_words, word_to_id), to_matrix(\n test_tags, tag_to_id)\n predicted_tag_probabilities = model.predict(test_words, verbose=1)\n predicted_tags = predicted_tag_probabilities.argmax(axis=-1)\n numerator = np.sum(np.logical_and(predicted_tags == test_tags, \n test_words != 0))\n denominator = np.sum(test_words != 0)\n return float(numerator) / denominator\n\n\nclass EvaluateAccuracy(keras.callbacks.Callback):\n\n def on_epoch_end(self, epoch, logs=None):\n sys.stdout.flush()\n print('\\nMeasuring validation accuracy...')\n acc = compute_test_accuracy(self.model)\n print('\\nValidation accuracy: %.5f\\n' % acc)\n sys.stdout.flush()\n\n\nmodel = keras.models.Sequential()\nmodel = keras.models.Sequential()\nmodel.add(L.InputLayer([None], dtype='int32'))\nmodel.add(L.Embedding(len(all_words), 50))\nmodel.add(L.TimeDistributed(L.Dense(96, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(96, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=\n 'tanh', recurrent_dropout=0.2, dropout=0.2)))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=\n 'tanh', recurrent_dropout=0.2, dropout=0.2)))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=\n 'tanh', recurrent_dropout=0.2, dropout=0.2)))\nmodel.add(L.Conv1D(128, 2, padding='same', activation='tanh'))\nmodel.add(L.Dropout(0.2))\nmodel.add(L.Conv1D(128, 3, padding='same', activation='tanh'))\nmodel.add(L.Dropout(0.2))\nmodel.add(L.Conv1D(128, 4, padding='same', activation='tanh'))\nmodel.add(L.TimeDistributed(L.Dense(256, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nstepwise_dense = L.Dense(len(all_tags), activation='softmax')\nstepwise_dense = L.TimeDistributed(stepwise_dense)\nmodel.add(stepwise_dense)\nmodel.summary()\nmodel.compile('adam', 'categorical_crossentropy')\nmodel.fit_generator(generate_batches(train_data), len(train_data) /\n BATCH_SIZE, callbacks=[EvaluateAccuracy()], epochs=50)\nacc = compute_test_accuracy(model)\nprint(\"\"\"\nFinal accuracy: %.5f\"\"\" % acc)\nmodel.save_weights('LSTM_gpu_trained_weights_1layer.h5')\n", "<import token>\nsys.path.append('..')\n<import token>\nhelpers.mask_busy_gpus(wait=False)\n<import token>\nnltk.download('brown')\nnltk.download('universal_tagset')\ndata = nltk.corpus.brown.tagged_sents(tagset='universal')\nall_tags = ['#EOS#', '#UNK#', 'ADV', 'NOUN', 'ADP', 'PRON', 'DET', '.',\n 'PRT', 'VERB', 'X', 'NUM', 'CONJ', 'ADJ']\ndata = np.array([[(word.lower(), tag) for word, tag in sentence] for\n sentence in data])\n<import token>\ntrain_data, test_data = train_test_split(data, test_size=0.25, random_state=42)\n<import token>\nword_counts = Counter()\nfor sentence in data:\n words, tags = zip(*sentence)\n word_counts.update(words)\nall_words = ['#EOS#', '#UNK#'] + list(list(zip(*word_counts.most_common(\n 10000)))[0])\nprint('Coverage = %.5f' % (float(sum(word_counts[w] for w in all_words)) /\n sum(word_counts.values())))\n<import token>\nword_to_id = defaultdict(lambda : 1, {word: i for i, word in enumerate(\n all_words)})\ntag_to_id = {tag: i for i, tag in enumerate(all_tags)}\n\n\ndef to_matrix(lines, token_to_id, max_len=None, pad=0, dtype='int32',\n time_major=False):\n \"\"\"Converts a list of names into rnn-digestable matrix with paddings added after the end\"\"\"\n max_len = max_len or max(map(len, lines))\n matrix = np.empty([len(lines), max_len], dtype)\n matrix.fill(pad)\n for i in range(len(lines)):\n line_ix = list(map(token_to_id.__getitem__, lines[i]))[:max_len]\n matrix[i, :len(line_ix)] = line_ix\n return matrix.T if time_major else matrix\n\n\nbatch_words, batch_tags = zip(*[zip(*sentence) for sentence in data[-3:]])\nprint('Word ids:')\nprint(to_matrix(batch_words, word_to_id))\nprint('Tag ids:')\nprint(to_matrix(batch_tags, tag_to_id))\n<import token>\nBATCH_SIZE = 32\n\n\ndef generate_batches(sentences, batch_size=BATCH_SIZE, max_len=None, pad=0):\n assert isinstance(sentences, np.ndarray\n ), 'Make sure sentences is q numpy array'\n while True:\n indices = np.random.permutation(np.arange(len(sentences)))\n for start in range(0, len(indices) - 1, batch_size):\n batch_indices = indices[start:start + batch_size]\n batch_words, batch_tags = [], []\n for sent in sentences[batch_indices]:\n words, tags = zip(*sent)\n batch_words.append(words)\n batch_tags.append(tags)\n batch_words = to_matrix(batch_words, word_to_id, max_len, pad)\n batch_tags = to_matrix(batch_tags, tag_to_id, max_len, pad)\n batch_tags_1hot = to_categorical(batch_tags, len(all_tags)\n ).reshape(batch_tags.shape + (-1,))\n yield batch_words, batch_tags_1hot\n\n\ndef compute_test_accuracy(model):\n test_words, test_tags = zip(*[zip(*sentence) for sentence in test_data])\n test_words, test_tags = to_matrix(test_words, word_to_id), to_matrix(\n test_tags, tag_to_id)\n predicted_tag_probabilities = model.predict(test_words, verbose=1)\n predicted_tags = predicted_tag_probabilities.argmax(axis=-1)\n numerator = np.sum(np.logical_and(predicted_tags == test_tags, \n test_words != 0))\n denominator = np.sum(test_words != 0)\n return float(numerator) / denominator\n\n\nclass EvaluateAccuracy(keras.callbacks.Callback):\n\n def on_epoch_end(self, epoch, logs=None):\n sys.stdout.flush()\n print('\\nMeasuring validation accuracy...')\n acc = compute_test_accuracy(self.model)\n print('\\nValidation accuracy: %.5f\\n' % acc)\n sys.stdout.flush()\n\n\nmodel = keras.models.Sequential()\nmodel = keras.models.Sequential()\nmodel.add(L.InputLayer([None], dtype='int32'))\nmodel.add(L.Embedding(len(all_words), 50))\nmodel.add(L.TimeDistributed(L.Dense(96, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(96, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=\n 'tanh', recurrent_dropout=0.2, dropout=0.2)))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=\n 'tanh', recurrent_dropout=0.2, dropout=0.2)))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=\n 'tanh', recurrent_dropout=0.2, dropout=0.2)))\nmodel.add(L.Conv1D(128, 2, padding='same', activation='tanh'))\nmodel.add(L.Dropout(0.2))\nmodel.add(L.Conv1D(128, 3, padding='same', activation='tanh'))\nmodel.add(L.Dropout(0.2))\nmodel.add(L.Conv1D(128, 4, padding='same', activation='tanh'))\nmodel.add(L.TimeDistributed(L.Dense(256, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nstepwise_dense = L.Dense(len(all_tags), activation='softmax')\nstepwise_dense = L.TimeDistributed(stepwise_dense)\nmodel.add(stepwise_dense)\nmodel.summary()\nmodel.compile('adam', 'categorical_crossentropy')\nmodel.fit_generator(generate_batches(train_data), len(train_data) /\n BATCH_SIZE, callbacks=[EvaluateAccuracy()], epochs=50)\nacc = compute_test_accuracy(model)\nprint(\"\"\"\nFinal accuracy: %.5f\"\"\" % acc)\nmodel.save_weights('LSTM_gpu_trained_weights_1layer.h5')\n", "<import token>\nsys.path.append('..')\n<import token>\nhelpers.mask_busy_gpus(wait=False)\n<import token>\nnltk.download('brown')\nnltk.download('universal_tagset')\n<assignment token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\nfor sentence in data:\n words, tags = zip(*sentence)\n word_counts.update(words)\n<assignment token>\nprint('Coverage = %.5f' % (float(sum(word_counts[w] for w in all_words)) /\n sum(word_counts.values())))\n<import token>\n<assignment token>\n\n\ndef to_matrix(lines, token_to_id, max_len=None, pad=0, dtype='int32',\n time_major=False):\n \"\"\"Converts a list of names into rnn-digestable matrix with paddings added after the end\"\"\"\n max_len = max_len or max(map(len, lines))\n matrix = np.empty([len(lines), max_len], dtype)\n matrix.fill(pad)\n for i in range(len(lines)):\n line_ix = list(map(token_to_id.__getitem__, lines[i]))[:max_len]\n matrix[i, :len(line_ix)] = line_ix\n return matrix.T if time_major else matrix\n\n\n<assignment token>\nprint('Word ids:')\nprint(to_matrix(batch_words, word_to_id))\nprint('Tag ids:')\nprint(to_matrix(batch_tags, tag_to_id))\n<import token>\n<assignment token>\n\n\ndef generate_batches(sentences, batch_size=BATCH_SIZE, max_len=None, pad=0):\n assert isinstance(sentences, np.ndarray\n ), 'Make sure sentences is q numpy array'\n while True:\n indices = np.random.permutation(np.arange(len(sentences)))\n for start in range(0, len(indices) - 1, batch_size):\n batch_indices = indices[start:start + batch_size]\n batch_words, batch_tags = [], []\n for sent in sentences[batch_indices]:\n words, tags = zip(*sent)\n batch_words.append(words)\n batch_tags.append(tags)\n batch_words = to_matrix(batch_words, word_to_id, max_len, pad)\n batch_tags = to_matrix(batch_tags, tag_to_id, max_len, pad)\n batch_tags_1hot = to_categorical(batch_tags, len(all_tags)\n ).reshape(batch_tags.shape + (-1,))\n yield batch_words, batch_tags_1hot\n\n\ndef compute_test_accuracy(model):\n test_words, test_tags = zip(*[zip(*sentence) for sentence in test_data])\n test_words, test_tags = to_matrix(test_words, word_to_id), to_matrix(\n test_tags, tag_to_id)\n predicted_tag_probabilities = model.predict(test_words, verbose=1)\n predicted_tags = predicted_tag_probabilities.argmax(axis=-1)\n numerator = np.sum(np.logical_and(predicted_tags == test_tags, \n test_words != 0))\n denominator = np.sum(test_words != 0)\n return float(numerator) / denominator\n\n\nclass EvaluateAccuracy(keras.callbacks.Callback):\n\n def on_epoch_end(self, epoch, logs=None):\n sys.stdout.flush()\n print('\\nMeasuring validation accuracy...')\n acc = compute_test_accuracy(self.model)\n print('\\nValidation accuracy: %.5f\\n' % acc)\n sys.stdout.flush()\n\n\n<assignment token>\nmodel.add(L.InputLayer([None], dtype='int32'))\nmodel.add(L.Embedding(len(all_words), 50))\nmodel.add(L.TimeDistributed(L.Dense(96, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(96, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=\n 'tanh', recurrent_dropout=0.2, dropout=0.2)))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=\n 'tanh', recurrent_dropout=0.2, dropout=0.2)))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=\n 'tanh', recurrent_dropout=0.2, dropout=0.2)))\nmodel.add(L.Conv1D(128, 2, padding='same', activation='tanh'))\nmodel.add(L.Dropout(0.2))\nmodel.add(L.Conv1D(128, 3, padding='same', activation='tanh'))\nmodel.add(L.Dropout(0.2))\nmodel.add(L.Conv1D(128, 4, padding='same', activation='tanh'))\nmodel.add(L.TimeDistributed(L.Dense(256, activation='tanh')))\nmodel.add(L.Dropout(0.25))\n<assignment token>\nmodel.add(stepwise_dense)\nmodel.summary()\nmodel.compile('adam', 'categorical_crossentropy')\nmodel.fit_generator(generate_batches(train_data), len(train_data) /\n BATCH_SIZE, callbacks=[EvaluateAccuracy()], epochs=50)\n<assignment token>\nprint(\"\"\"\nFinal accuracy: %.5f\"\"\" % acc)\nmodel.save_weights('LSTM_gpu_trained_weights_1layer.h5')\n", "<import token>\n<code token>\n<import token>\n<code token>\n<import token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n\n\ndef to_matrix(lines, token_to_id, max_len=None, pad=0, dtype='int32',\n time_major=False):\n \"\"\"Converts a list of names into rnn-digestable matrix with paddings added after the end\"\"\"\n max_len = max_len or max(map(len, lines))\n matrix = np.empty([len(lines), max_len], dtype)\n matrix.fill(pad)\n for i in range(len(lines)):\n line_ix = list(map(token_to_id.__getitem__, lines[i]))[:max_len]\n matrix[i, :len(line_ix)] = line_ix\n return matrix.T if time_major else matrix\n\n\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n\n\ndef generate_batches(sentences, batch_size=BATCH_SIZE, max_len=None, pad=0):\n assert isinstance(sentences, np.ndarray\n ), 'Make sure sentences is q numpy array'\n while True:\n indices = np.random.permutation(np.arange(len(sentences)))\n for start in range(0, len(indices) - 1, batch_size):\n batch_indices = indices[start:start + batch_size]\n batch_words, batch_tags = [], []\n for sent in sentences[batch_indices]:\n words, tags = zip(*sent)\n batch_words.append(words)\n batch_tags.append(tags)\n batch_words = to_matrix(batch_words, word_to_id, max_len, pad)\n batch_tags = to_matrix(batch_tags, tag_to_id, max_len, pad)\n batch_tags_1hot = to_categorical(batch_tags, len(all_tags)\n ).reshape(batch_tags.shape + (-1,))\n yield batch_words, batch_tags_1hot\n\n\ndef compute_test_accuracy(model):\n test_words, test_tags = zip(*[zip(*sentence) for sentence in test_data])\n test_words, test_tags = to_matrix(test_words, word_to_id), to_matrix(\n test_tags, tag_to_id)\n predicted_tag_probabilities = model.predict(test_words, verbose=1)\n predicted_tags = predicted_tag_probabilities.argmax(axis=-1)\n numerator = np.sum(np.logical_and(predicted_tags == test_tags, \n test_words != 0))\n denominator = np.sum(test_words != 0)\n return float(numerator) / denominator\n\n\nclass EvaluateAccuracy(keras.callbacks.Callback):\n\n def on_epoch_end(self, epoch, logs=None):\n sys.stdout.flush()\n print('\\nMeasuring validation accuracy...')\n acc = compute_test_accuracy(self.model)\n print('\\nValidation accuracy: %.5f\\n' % acc)\n sys.stdout.flush()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<import token>\n<code token>\n<import token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n\n\ndef to_matrix(lines, token_to_id, max_len=None, pad=0, dtype='int32',\n time_major=False):\n \"\"\"Converts a list of names into rnn-digestable matrix with paddings added after the end\"\"\"\n max_len = max_len or max(map(len, lines))\n matrix = np.empty([len(lines), max_len], dtype)\n matrix.fill(pad)\n for i in range(len(lines)):\n line_ix = list(map(token_to_id.__getitem__, lines[i]))[:max_len]\n matrix[i, :len(line_ix)] = line_ix\n return matrix.T if time_major else matrix\n\n\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n\n\ndef compute_test_accuracy(model):\n test_words, test_tags = zip(*[zip(*sentence) for sentence in test_data])\n test_words, test_tags = to_matrix(test_words, word_to_id), to_matrix(\n test_tags, tag_to_id)\n predicted_tag_probabilities = model.predict(test_words, verbose=1)\n predicted_tags = predicted_tag_probabilities.argmax(axis=-1)\n numerator = np.sum(np.logical_and(predicted_tags == test_tags, \n test_words != 0))\n denominator = np.sum(test_words != 0)\n return float(numerator) / denominator\n\n\nclass EvaluateAccuracy(keras.callbacks.Callback):\n\n def on_epoch_end(self, epoch, logs=None):\n sys.stdout.flush()\n print('\\nMeasuring validation accuracy...')\n acc = compute_test_accuracy(self.model)\n print('\\nValidation accuracy: %.5f\\n' % acc)\n sys.stdout.flush()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<import token>\n<code token>\n<import token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n\n\ndef compute_test_accuracy(model):\n test_words, test_tags = zip(*[zip(*sentence) for sentence in test_data])\n test_words, test_tags = to_matrix(test_words, word_to_id), to_matrix(\n test_tags, tag_to_id)\n predicted_tag_probabilities = model.predict(test_words, verbose=1)\n predicted_tags = predicted_tag_probabilities.argmax(axis=-1)\n numerator = np.sum(np.logical_and(predicted_tags == test_tags, \n test_words != 0))\n denominator = np.sum(test_words != 0)\n return float(numerator) / denominator\n\n\nclass EvaluateAccuracy(keras.callbacks.Callback):\n\n def on_epoch_end(self, epoch, logs=None):\n sys.stdout.flush()\n print('\\nMeasuring validation accuracy...')\n acc = compute_test_accuracy(self.model)\n print('\\nValidation accuracy: %.5f\\n' % acc)\n sys.stdout.flush()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<import token>\n<code token>\n<import token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n\n\nclass EvaluateAccuracy(keras.callbacks.Callback):\n\n def on_epoch_end(self, epoch, logs=None):\n sys.stdout.flush()\n print('\\nMeasuring validation accuracy...')\n acc = compute_test_accuracy(self.model)\n print('\\nValidation accuracy: %.5f\\n' % acc)\n sys.stdout.flush()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<import token>\n<code token>\n<import token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n\n\nclass EvaluateAccuracy(keras.callbacks.Callback):\n <function token>\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<import token>\n<code token>\n<import token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<class token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n" ]
false
307
8698aedc5c8671f46c73898a7188440254b79bbf
from abc import abstractmethod class Environment: @abstractmethod def __init__(self, agent): pass @abstractmethod def execute_step(self, n=1): pass @abstractmethod def execute_all(self): pass @abstractmethod def set_delay(self, delay): pass
[ "from abc import abstractmethod\n\n\nclass Environment:\n\n @abstractmethod\n def __init__(self, agent):\n pass\n\n @abstractmethod\n def execute_step(self, n=1):\n pass\n\n @abstractmethod\n def execute_all(self):\n pass\n\n @abstractmethod\n def set_delay(self, delay):\n pass\n\n", "from abc import abstractmethod\n\n\nclass Environment:\n\n @abstractmethod\n def __init__(self, agent):\n pass\n\n @abstractmethod\n def execute_step(self, n=1):\n pass\n\n @abstractmethod\n def execute_all(self):\n pass\n\n @abstractmethod\n def set_delay(self, delay):\n pass\n", "<import token>\n\n\nclass Environment:\n\n @abstractmethod\n def __init__(self, agent):\n pass\n\n @abstractmethod\n def execute_step(self, n=1):\n pass\n\n @abstractmethod\n def execute_all(self):\n pass\n\n @abstractmethod\n def set_delay(self, delay):\n pass\n", "<import token>\n\n\nclass Environment:\n\n @abstractmethod\n def __init__(self, agent):\n pass\n\n @abstractmethod\n def execute_step(self, n=1):\n pass\n <function token>\n\n @abstractmethod\n def set_delay(self, delay):\n pass\n", "<import token>\n\n\nclass Environment:\n\n @abstractmethod\n def __init__(self, agent):\n pass\n\n @abstractmethod\n def execute_step(self, n=1):\n pass\n <function token>\n <function token>\n", "<import token>\n\n\nclass Environment:\n\n @abstractmethod\n def __init__(self, agent):\n pass\n <function token>\n <function token>\n <function token>\n", "<import token>\n\n\nclass Environment:\n <function token>\n <function token>\n <function token>\n <function token>\n", "<import token>\n<class token>\n" ]
false
308
1be510e6715d21e814c48fe05496704e9a65d554
from end import Client c = Client()
[ "from end import Client\n\nc = Client()\n", "from end import Client\nc = Client()\n", "<import token>\nc = Client()\n", "<import token>\n<assignment token>\n" ]
false
309
6b727cdfc684db4ba919cd5390fe45de43a806fe
import glob import xarray as xr from model_diagnostics import * data_root = '../data/synthetic/standard/' var_list = ['hs', 'dp', 'spr', 'fp', 'dir', 't0m1'] eke = 0.01 ########################## output = [] diagnostic_functions = [basic_stats] for var in var_list: grid_files = glob.glob(data_root+'gridded/*%s*%s.nc' %(eke,var)) for f in grid_files: output.append(analize_member(f, var,diagnostic_functions)) print("processing %s" %os.path.basename(f)) var = 'hs' diagnostic_functions = [hs_spectral_slope] grid_files = glob.glob(data_root+'gridded/*%s*%s.nc' %(eke,var)) for f in grid_files: output.append(analize_member(f, var,diagnostic_functions)) print("processing %s" %os.path.basename(f)) var = 'cur' diagnostic_functions = [flow_stats] grid_files = glob.glob(data_root+'gridded/*%s*%s.nc' %(eke,var)) for f in grid_files: output.append(analize_member(f, var, diagnostic_functions)) print("processing %s" %os.path.basename(f)) ds = xr.merge(output) df = ds.to_dataframe() df = df.reset_index() data = df.to_xarray() data.to_netcdf(path='../data/model_stats/S%s_gridded_stats.nc'%eke, mode='w')
[ "import glob\nimport xarray as xr\n\nfrom model_diagnostics import *\n\ndata_root = '../data/synthetic/standard/'\nvar_list = ['hs', 'dp', 'spr', 'fp', 'dir', 't0m1']\neke = 0.01\n##########################\noutput = []\ndiagnostic_functions = [basic_stats]\nfor var in var_list:\n grid_files = glob.glob(data_root+'gridded/*%s*%s.nc' %(eke,var))\n for f in grid_files:\n output.append(analize_member(f, var,diagnostic_functions))\n print(\"processing %s\" %os.path.basename(f))\nvar = 'hs'\ndiagnostic_functions = [hs_spectral_slope]\ngrid_files = glob.glob(data_root+'gridded/*%s*%s.nc' %(eke,var))\nfor f in grid_files:\n output.append(analize_member(f, var,diagnostic_functions))\n print(\"processing %s\" %os.path.basename(f))\nvar = 'cur'\ndiagnostic_functions = [flow_stats]\ngrid_files = glob.glob(data_root+'gridded/*%s*%s.nc' %(eke,var))\nfor f in grid_files:\n output.append(analize_member(f, var, diagnostic_functions))\n print(\"processing %s\" %os.path.basename(f))\n\nds = xr.merge(output)\ndf = ds.to_dataframe()\ndf = df.reset_index()\ndata = df.to_xarray()\ndata.to_netcdf(path='../data/model_stats/S%s_gridded_stats.nc'%eke, mode='w')\n", "import glob\nimport xarray as xr\nfrom model_diagnostics import *\ndata_root = '../data/synthetic/standard/'\nvar_list = ['hs', 'dp', 'spr', 'fp', 'dir', 't0m1']\neke = 0.01\noutput = []\ndiagnostic_functions = [basic_stats]\nfor var in var_list:\n grid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))\n for f in grid_files:\n output.append(analize_member(f, var, diagnostic_functions))\n print('processing %s' % os.path.basename(f))\nvar = 'hs'\ndiagnostic_functions = [hs_spectral_slope]\ngrid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))\nfor f in grid_files:\n output.append(analize_member(f, var, diagnostic_functions))\n print('processing %s' % os.path.basename(f))\nvar = 'cur'\ndiagnostic_functions = [flow_stats]\ngrid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))\nfor f in grid_files:\n output.append(analize_member(f, var, diagnostic_functions))\n print('processing %s' % os.path.basename(f))\nds = xr.merge(output)\ndf = ds.to_dataframe()\ndf = df.reset_index()\ndata = df.to_xarray()\ndata.to_netcdf(path='../data/model_stats/S%s_gridded_stats.nc' % eke, mode='w')\n", "<import token>\ndata_root = '../data/synthetic/standard/'\nvar_list = ['hs', 'dp', 'spr', 'fp', 'dir', 't0m1']\neke = 0.01\noutput = []\ndiagnostic_functions = [basic_stats]\nfor var in var_list:\n grid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))\n for f in grid_files:\n output.append(analize_member(f, var, diagnostic_functions))\n print('processing %s' % os.path.basename(f))\nvar = 'hs'\ndiagnostic_functions = [hs_spectral_slope]\ngrid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))\nfor f in grid_files:\n output.append(analize_member(f, var, diagnostic_functions))\n print('processing %s' % os.path.basename(f))\nvar = 'cur'\ndiagnostic_functions = [flow_stats]\ngrid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))\nfor f in grid_files:\n output.append(analize_member(f, var, diagnostic_functions))\n print('processing %s' % os.path.basename(f))\nds = xr.merge(output)\ndf = ds.to_dataframe()\ndf = df.reset_index()\ndata = df.to_xarray()\ndata.to_netcdf(path='../data/model_stats/S%s_gridded_stats.nc' % eke, mode='w')\n", "<import token>\n<assignment token>\nfor var in var_list:\n grid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))\n for f in grid_files:\n output.append(analize_member(f, var, diagnostic_functions))\n print('processing %s' % os.path.basename(f))\n<assignment token>\nfor f in grid_files:\n output.append(analize_member(f, var, diagnostic_functions))\n print('processing %s' % os.path.basename(f))\n<assignment token>\nfor f in grid_files:\n output.append(analize_member(f, var, diagnostic_functions))\n print('processing %s' % os.path.basename(f))\n<assignment token>\ndata.to_netcdf(path='../data/model_stats/S%s_gridded_stats.nc' % eke, mode='w')\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n" ]
false
310
c66f4ee5719f764c8c713c23815302c00b6fb9af
import os from cs50 import SQL from flask import Flask, flash, redirect, render_template, request, session from flask_session import Session from tempfile import mkdtemp from werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError from werkzeug.security import check_password_hash, generate_password_hash from helpers import apology, login_required, lookup, usd # Configure application app = Flask(__name__) # Ensure templates are auto-reloaded app.config["TEMPLATES_AUTO_RELOAD"] = True # Ensure responses aren't cached @app.after_request def after_request(response): response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate" response.headers["Expires"] = 0 response.headers["Pragma"] = "no-cache" return response # Custom filter app.jinja_env.filters["usd"] = usd # Configure session to use filesystem (instead of signed cookies) app.config["SESSION_FILE_DIR"] = mkdtemp() app.config["SESSION_PERMANENT"] = False app.config["SESSION_TYPE"] = "filesystem" Session(app) # Configure CS50 Library to use SQLite database db = SQL("sqlite:///finance.db") # Make sure API key is set if not os.environ.get("API_KEY"): raise RuntimeError("API_KEY not set") @app.route("/") @login_required def index(): """Show portfolio of stocks""" rows=db.execute("SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions",session["user_id"]) cash=db.execute("SELECT cash FROM users WHERE id=?",session["user_id"]) cash_=cash[0]["cash"] #store all the data into a dict so its easier to pass in to html display=[] total_share=0 for row in rows: symbol=str(row["symbol"]) print(symbol) name=lookup(symbol)["name"] shares=int(row["amount"]) price=float(lookup(symbol)["price"]) total=float(shares) *price total_share+=total display.append({'symbol':symbol, 'name':name, 'shares':shares, 'price':price, 'total':total}) total_money=total_share+cash[0]["cash"] return render_template("index.html",display=display,total_money=total_money,cash=cash_) @app.route("/buy", methods=["GET", "POST"]) @login_required def buy(): """Buy shares of stock""" if request.method == "POST": # Ensure symbol was submitted if not request.form.get("symbol"): return apology("must provide symbol", 400) # Ensure shares was submitted elif not request.form.get("shares"): return apology("must provide shares", 400) if not request.form.get("shares").isdigit(): return apology("must be integer",400) elif int(request.form.get("shares"))<1 : return apology("must be positive integer", 400) elif lookup(request.form.get("symbol"))==None: return apology("Must be a valid symbol",400) #ensure money>price quote=lookup(request.form.get("symbol")) shares=request.form.get("shares") cash=db.execute("SELECT cash FROM users WHERE id=?",session["user_id"]) if cash[0]["cash"]<int(quote["price"])*int(shares): return apology("You can't affort this/these",400) #BUY, STORE DATA IN REPOSITORY AND RECORD #record this transaction db.execute("INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))",session["user_id"],int(shares),quote["symbol"],float(quote["price"])) #deduct the cash total=int(quote["price"])*int(shares) db.execute("UPDATE users SET cash=cash- (?) WHERE id=?",total,session["user_id"]) return redirect("/") else: return render_template("buy.html") @app.route("/history") @login_required def history(): """Show history of transactions""" rows=db.execute("SELECT * FROM record ORDER BY t1") return render_template("history.html",rows=rows) @app.route("/login", methods=["GET", "POST"]) def login(): """Log user in""" # Forget any user_id session.clear() # User reached route via POST (as by submitting a form via POST) if request.method == "POST": # Ensure username was submitted if not request.form.get("username"): return apology("must provide username", 403) # Ensure password was submitted elif not request.form.get("password"): return apology("must provide password", 403) # Query database for username rows = db.execute("SELECT * FROM users WHERE username = ?", request.form.get("username")) # Ensure username exists and password is correct if len(rows) != 1 or not check_password_hash(rows[0]["hash"], request.form.get("password")): return apology("invalid username and/or password", 403) # Remember which user has logged in session["user_id"] = rows[0]["id"] # Redirect user to home page return redirect("/") # User reached route via GET (as by clicking a link or via redirect) else: return render_template("login.html") @app.route("/logout") def logout(): """Log user out""" # Forget any user_id session.clear() # Redirect user to login form return redirect("/") @app.route("/quote", methods=["GET", "POST"]) @login_required def quote(): """Get stock quote.""" if request.method=="POST": quote=lookup(request.form.get("symbol")) if quote==None: return apology("Invalid symbol",400) price=usd(quote["price"]) return render_template("quoted.html",quote=quote,price=price) else: return render_template("quote.html") @app.route("/register", methods=["GET", "POST"]) def register(): """Register user""" if request.method == "POST": # Ensure username was submitted if not request.form.get("username"): return apology("must provide username", 400) # Ensure password was submitted elif not request.form.get("password"): return apology("must provide password", 400) # Ensure comfirm password was submitted elif not request.form.get("confirmation"): return apology("must comfirm password", 400) # Ensure password matches elif request.form.get("confirmation") != request.form.get("password"): return apology("Password not matches",400) # Ensure username is new(unique) rows = db.execute("SELECT * FROM users WHERE username = ?", request.form.get("username")) if len(rows) != 0: return apology("username used", 400) db.execute("INSERT INTO users (username,hash) VALUES (?,?)",request.form.get("username"),generate_password_hash(request.form.get("password"))) # Redirect user to home page return redirect("/") else: return render_template("register.html") @app.route("/sell", methods=["GET", "POST"]) @login_required def sell(): """Sell shares of stock""" if request.method=='POST': #parameter is not filled if not request.form.get("shares"): return apology("Please enter how much u want to sell",400) #check if shares(amount) that are going to be sell less than owner's share. sell=request.form.get("symbol") shares=request.form.get("shares") amount=db.execute("SELECT SUM(transactions) as amount FROM record WHERE userID=? AND symbol=? GROUP BY symbol HAVING transactions",session["user_id"],sell) if amount[0]["amount"]<int(shares): return apology("You dont own that much shares",400) #record sell and add cash amount quote=lookup(sell) price=quote["price"] total=int(price)*int(shares) db.execute("INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%s','now'))",session["user_id"],(int(shares)*-1),quote["symbol"],price) db.execute("UPDATE users SET cash=cash+ (?) WHERE id=?",total,session["user_id"]) return redirect("/") else: rows=db.execute("SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions",session["user_id"]) return render_template("sell.html",rows=rows) @app.route("/HAX", methods=["GET", "POST"]) @login_required def HAX(): #add free monei boiiii if request.method=="POST": total=request.form.get("HAX") db.execute("UPDATE users SET cash=cash+ (?) WHERE id=?",total,session["user_id"]) flash(u'HAX SUCCESSFULLY ACTIVATED!!!') return redirect("/") else: return render_template("HAX.html") def errorhandler(e): """Handle error""" if not isinstance(e, HTTPException): e = InternalServerError() return apology(e.name, e.code) # Listen for errors for code in default_exceptions: app.errorhandler(code)(errorhandler)
[ "import os\n\nfrom cs50 import SQL\nfrom flask import Flask, flash, redirect, render_template, request, session\nfrom flask_session import Session\nfrom tempfile import mkdtemp\nfrom werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError\nfrom werkzeug.security import check_password_hash, generate_password_hash\n\nfrom helpers import apology, login_required, lookup, usd\n\n# Configure application\napp = Flask(__name__)\n\n# Ensure templates are auto-reloaded\napp.config[\"TEMPLATES_AUTO_RELOAD\"] = True\n\n\n# Ensure responses aren't cached\[email protected]_request\ndef after_request(response):\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n response.headers[\"Expires\"] = 0\n response.headers[\"Pragma\"] = \"no-cache\"\n return response\n\n\n# Custom filter\napp.jinja_env.filters[\"usd\"] = usd\n\n# Configure session to use filesystem (instead of signed cookies)\napp.config[\"SESSION_FILE_DIR\"] = mkdtemp()\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\nSession(app)\n\n# Configure CS50 Library to use SQLite database\ndb = SQL(\"sqlite:///finance.db\")\n\n# Make sure API key is set\nif not os.environ.get(\"API_KEY\"):\n raise RuntimeError(\"API_KEY not set\")\n\n\[email protected](\"/\")\n@login_required\ndef index():\n \"\"\"Show portfolio of stocks\"\"\"\n rows=db.execute(\"SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions\",session[\"user_id\"])\n cash=db.execute(\"SELECT cash FROM users WHERE id=?\",session[\"user_id\"])\n cash_=cash[0][\"cash\"]\n\n #store all the data into a dict so its easier to pass in to html\n display=[]\n total_share=0\n for row in rows:\n symbol=str(row[\"symbol\"])\n print(symbol)\n name=lookup(symbol)[\"name\"]\n shares=int(row[\"amount\"])\n price=float(lookup(symbol)[\"price\"])\n total=float(shares) *price\n total_share+=total\n display.append({'symbol':symbol, 'name':name, 'shares':shares, 'price':price, 'total':total})\n\n total_money=total_share+cash[0][\"cash\"]\n return render_template(\"index.html\",display=display,total_money=total_money,cash=cash_)\n\n\n\[email protected](\"/buy\", methods=[\"GET\", \"POST\"])\n@login_required\ndef buy():\n \"\"\"Buy shares of stock\"\"\"\n if request.method == \"POST\":\n\n # Ensure symbol was submitted\n if not request.form.get(\"symbol\"):\n return apology(\"must provide symbol\", 400)\n\n # Ensure shares was submitted\n elif not request.form.get(\"shares\"):\n return apology(\"must provide shares\", 400)\n\n if not request.form.get(\"shares\").isdigit():\n return apology(\"must be integer\",400)\n\n elif int(request.form.get(\"shares\"))<1 :\n return apology(\"must be positive integer\", 400)\n\n elif lookup(request.form.get(\"symbol\"))==None:\n return apology(\"Must be a valid symbol\",400)\n\n #ensure money>price\n quote=lookup(request.form.get(\"symbol\"))\n shares=request.form.get(\"shares\")\n cash=db.execute(\"SELECT cash FROM users WHERE id=?\",session[\"user_id\"])\n if cash[0][\"cash\"]<int(quote[\"price\"])*int(shares):\n return apology(\"You can't affort this/these\",400)\n\n #BUY, STORE DATA IN REPOSITORY AND RECORD\n\n #record this transaction\n db.execute(\"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))\",session[\"user_id\"],int(shares),quote[\"symbol\"],float(quote[\"price\"]))\n\n #deduct the cash\n total=int(quote[\"price\"])*int(shares)\n db.execute(\"UPDATE users SET cash=cash- (?) WHERE id=?\",total,session[\"user_id\"])\n\n return redirect(\"/\")\n\n else:\n return render_template(\"buy.html\")\n\[email protected](\"/history\")\n@login_required\ndef history():\n \"\"\"Show history of transactions\"\"\"\n rows=db.execute(\"SELECT * FROM record ORDER BY t1\")\n return render_template(\"history.html\",rows=rows)\n\n\[email protected](\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n \"\"\"Log user in\"\"\"\n\n # Forget any user_id\n session.clear()\n\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n\n # Ensure username was submitted\n if not request.form.get(\"username\"):\n return apology(\"must provide username\", 403)\n\n # Ensure password was submitted\n elif not request.form.get(\"password\"):\n return apology(\"must provide password\", 403)\n\n # Query database for username\n rows = db.execute(\"SELECT * FROM users WHERE username = ?\", request.form.get(\"username\"))\n\n # Ensure username exists and password is correct\n if len(rows) != 1 or not check_password_hash(rows[0][\"hash\"], request.form.get(\"password\")):\n return apology(\"invalid username and/or password\", 403)\n\n # Remember which user has logged in\n session[\"user_id\"] = rows[0][\"id\"]\n\n # Redirect user to home page\n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"login.html\")\n\n\[email protected](\"/logout\")\ndef logout():\n \"\"\"Log user out\"\"\"\n\n # Forget any user_id\n session.clear()\n\n # Redirect user to login form\n return redirect(\"/\")\n\n\[email protected](\"/quote\", methods=[\"GET\", \"POST\"])\n@login_required\ndef quote():\n \"\"\"Get stock quote.\"\"\"\n if request.method==\"POST\":\n quote=lookup(request.form.get(\"symbol\"))\n if quote==None:\n return apology(\"Invalid symbol\",400)\n price=usd(quote[\"price\"])\n return render_template(\"quoted.html\",quote=quote,price=price)\n else:\n return render_template(\"quote.html\")\n\[email protected](\"/register\", methods=[\"GET\", \"POST\"])\ndef register():\n \"\"\"Register user\"\"\"\n if request.method == \"POST\":\n\n # Ensure username was submitted\n if not request.form.get(\"username\"):\n return apology(\"must provide username\", 400)\n\n # Ensure password was submitted\n elif not request.form.get(\"password\"):\n return apology(\"must provide password\", 400)\n\n # Ensure comfirm password was submitted\n elif not request.form.get(\"confirmation\"):\n return apology(\"must comfirm password\", 400)\n\n # Ensure password matches\n elif request.form.get(\"confirmation\") != request.form.get(\"password\"):\n return apology(\"Password not matches\",400)\n\n # Ensure username is new(unique)\n rows = db.execute(\"SELECT * FROM users WHERE username = ?\", request.form.get(\"username\"))\n if len(rows) != 0:\n return apology(\"username used\", 400)\n\n db.execute(\"INSERT INTO users (username,hash) VALUES (?,?)\",request.form.get(\"username\"),generate_password_hash(request.form.get(\"password\")))\n\n\n # Redirect user to home page\n return redirect(\"/\")\n\n\n else:\n return render_template(\"register.html\")\n\n\[email protected](\"/sell\", methods=[\"GET\", \"POST\"])\n@login_required\ndef sell():\n \"\"\"Sell shares of stock\"\"\"\n if request.method=='POST':\n #parameter is not filled\n if not request.form.get(\"shares\"):\n return apology(\"Please enter how much u want to sell\",400)\n #check if shares(amount) that are going to be sell less than owner's share.\n sell=request.form.get(\"symbol\")\n shares=request.form.get(\"shares\")\n amount=db.execute(\"SELECT SUM(transactions) as amount FROM record WHERE userID=? AND symbol=? GROUP BY symbol HAVING transactions\",session[\"user_id\"],sell)\n if amount[0][\"amount\"]<int(shares):\n return apology(\"You dont own that much shares\",400)\n\n #record sell and add cash amount\n quote=lookup(sell)\n price=quote[\"price\"]\n total=int(price)*int(shares)\n\n db.execute(\"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%s','now'))\",session[\"user_id\"],(int(shares)*-1),quote[\"symbol\"],price)\n db.execute(\"UPDATE users SET cash=cash+ (?) WHERE id=?\",total,session[\"user_id\"])\n\n return redirect(\"/\")\n\n else:\n rows=db.execute(\"SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions\",session[\"user_id\"])\n\n return render_template(\"sell.html\",rows=rows)\n\n\n\[email protected](\"/HAX\", methods=[\"GET\", \"POST\"])\n@login_required\ndef HAX():\n #add free monei boiiii\n if request.method==\"POST\":\n total=request.form.get(\"HAX\")\n db.execute(\"UPDATE users SET cash=cash+ (?) WHERE id=?\",total,session[\"user_id\"])\n flash(u'HAX SUCCESSFULLY ACTIVATED!!!')\n\n return redirect(\"/\")\n\n else:\n return render_template(\"HAX.html\")\n\n\n\n\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n if not isinstance(e, HTTPException):\n e = InternalServerError()\n return apology(e.name, e.code)\n\n\n# Listen for errors\nfor code in default_exceptions:\n app.errorhandler(code)(errorhandler)\n", "import os\nfrom cs50 import SQL\nfrom flask import Flask, flash, redirect, render_template, request, session\nfrom flask_session import Session\nfrom tempfile import mkdtemp\nfrom werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError\nfrom werkzeug.security import check_password_hash, generate_password_hash\nfrom helpers import apology, login_required, lookup, usd\napp = Flask(__name__)\napp.config['TEMPLATES_AUTO_RELOAD'] = True\n\n\[email protected]_request\ndef after_request(response):\n response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'\n response.headers['Expires'] = 0\n response.headers['Pragma'] = 'no-cache'\n return response\n\n\napp.jinja_env.filters['usd'] = usd\napp.config['SESSION_FILE_DIR'] = mkdtemp()\napp.config['SESSION_PERMANENT'] = False\napp.config['SESSION_TYPE'] = 'filesystem'\nSession(app)\ndb = SQL('sqlite:///finance.db')\nif not os.environ.get('API_KEY'):\n raise RuntimeError('API_KEY not set')\n\n\[email protected]('/')\n@login_required\ndef index():\n \"\"\"Show portfolio of stocks\"\"\"\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n cash = db.execute('SELECT cash FROM users WHERE id=?', session['user_id'])\n cash_ = cash[0]['cash']\n display = []\n total_share = 0\n for row in rows:\n symbol = str(row['symbol'])\n print(symbol)\n name = lookup(symbol)['name']\n shares = int(row['amount'])\n price = float(lookup(symbol)['price'])\n total = float(shares) * price\n total_share += total\n display.append({'symbol': symbol, 'name': name, 'shares': shares,\n 'price': price, 'total': total})\n total_money = total_share + cash[0]['cash']\n return render_template('index.html', display=display, total_money=\n total_money, cash=cash_)\n\n\[email protected]('/buy', methods=['GET', 'POST'])\n@login_required\ndef buy():\n \"\"\"Buy shares of stock\"\"\"\n if request.method == 'POST':\n if not request.form.get('symbol'):\n return apology('must provide symbol', 400)\n elif not request.form.get('shares'):\n return apology('must provide shares', 400)\n if not request.form.get('shares').isdigit():\n return apology('must be integer', 400)\n elif int(request.form.get('shares')) < 1:\n return apology('must be positive integer', 400)\n elif lookup(request.form.get('symbol')) == None:\n return apology('Must be a valid symbol', 400)\n quote = lookup(request.form.get('symbol'))\n shares = request.form.get('shares')\n cash = db.execute('SELECT cash FROM users WHERE id=?', session[\n 'user_id'])\n if cash[0]['cash'] < int(quote['price']) * int(shares):\n return apology(\"You can't affort this/these\", 400)\n db.execute(\n \"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))\"\n , session['user_id'], int(shares), quote['symbol'], float(quote\n ['price']))\n total = int(quote['price']) * int(shares)\n db.execute('UPDATE users SET cash=cash- (?) WHERE id=?', total,\n session['user_id'])\n return redirect('/')\n else:\n return render_template('buy.html')\n\n\[email protected]('/history')\n@login_required\ndef history():\n \"\"\"Show history of transactions\"\"\"\n rows = db.execute('SELECT * FROM record ORDER BY t1')\n return render_template('history.html', rows=rows)\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n \"\"\"Log user in\"\"\"\n session.clear()\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 403)\n elif not request.form.get('password'):\n return apology('must provide password', 403)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 1 or not check_password_hash(rows[0]['hash'],\n request.form.get('password')):\n return apology('invalid username and/or password', 403)\n session['user_id'] = rows[0]['id']\n return redirect('/')\n else:\n return render_template('login.html')\n\n\[email protected]('/logout')\ndef logout():\n \"\"\"Log user out\"\"\"\n session.clear()\n return redirect('/')\n\n\[email protected]('/quote', methods=['GET', 'POST'])\n@login_required\ndef quote():\n \"\"\"Get stock quote.\"\"\"\n if request.method == 'POST':\n quote = lookup(request.form.get('symbol'))\n if quote == None:\n return apology('Invalid symbol', 400)\n price = usd(quote['price'])\n return render_template('quoted.html', quote=quote, price=price)\n else:\n return render_template('quote.html')\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef register():\n \"\"\"Register user\"\"\"\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 400)\n elif not request.form.get('password'):\n return apology('must provide password', 400)\n elif not request.form.get('confirmation'):\n return apology('must comfirm password', 400)\n elif request.form.get('confirmation') != request.form.get('password'):\n return apology('Password not matches', 400)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 0:\n return apology('username used', 400)\n db.execute('INSERT INTO users (username,hash) VALUES (?,?)',\n request.form.get('username'), generate_password_hash(request.\n form.get('password')))\n return redirect('/')\n else:\n return render_template('register.html')\n\n\[email protected]('/sell', methods=['GET', 'POST'])\n@login_required\ndef sell():\n \"\"\"Sell shares of stock\"\"\"\n if request.method == 'POST':\n if not request.form.get('shares'):\n return apology('Please enter how much u want to sell', 400)\n sell = request.form.get('symbol')\n shares = request.form.get('shares')\n amount = db.execute(\n 'SELECT SUM(transactions) as amount FROM record WHERE userID=? AND symbol=? GROUP BY symbol HAVING transactions'\n , session['user_id'], sell)\n if amount[0]['amount'] < int(shares):\n return apology('You dont own that much shares', 400)\n quote = lookup(sell)\n price = quote['price']\n total = int(price) * int(shares)\n db.execute(\n \"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%s','now'))\"\n , session['user_id'], int(shares) * -1, quote['symbol'], price)\n db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,\n session['user_id'])\n return redirect('/')\n else:\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n return render_template('sell.html', rows=rows)\n\n\[email protected]('/HAX', methods=['GET', 'POST'])\n@login_required\ndef HAX():\n if request.method == 'POST':\n total = request.form.get('HAX')\n db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,\n session['user_id'])\n flash(u'HAX SUCCESSFULLY ACTIVATED!!!')\n return redirect('/')\n else:\n return render_template('HAX.html')\n\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n if not isinstance(e, HTTPException):\n e = InternalServerError()\n return apology(e.name, e.code)\n\n\nfor code in default_exceptions:\n app.errorhandler(code)(errorhandler)\n", "<import token>\napp = Flask(__name__)\napp.config['TEMPLATES_AUTO_RELOAD'] = True\n\n\[email protected]_request\ndef after_request(response):\n response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'\n response.headers['Expires'] = 0\n response.headers['Pragma'] = 'no-cache'\n return response\n\n\napp.jinja_env.filters['usd'] = usd\napp.config['SESSION_FILE_DIR'] = mkdtemp()\napp.config['SESSION_PERMANENT'] = False\napp.config['SESSION_TYPE'] = 'filesystem'\nSession(app)\ndb = SQL('sqlite:///finance.db')\nif not os.environ.get('API_KEY'):\n raise RuntimeError('API_KEY not set')\n\n\[email protected]('/')\n@login_required\ndef index():\n \"\"\"Show portfolio of stocks\"\"\"\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n cash = db.execute('SELECT cash FROM users WHERE id=?', session['user_id'])\n cash_ = cash[0]['cash']\n display = []\n total_share = 0\n for row in rows:\n symbol = str(row['symbol'])\n print(symbol)\n name = lookup(symbol)['name']\n shares = int(row['amount'])\n price = float(lookup(symbol)['price'])\n total = float(shares) * price\n total_share += total\n display.append({'symbol': symbol, 'name': name, 'shares': shares,\n 'price': price, 'total': total})\n total_money = total_share + cash[0]['cash']\n return render_template('index.html', display=display, total_money=\n total_money, cash=cash_)\n\n\[email protected]('/buy', methods=['GET', 'POST'])\n@login_required\ndef buy():\n \"\"\"Buy shares of stock\"\"\"\n if request.method == 'POST':\n if not request.form.get('symbol'):\n return apology('must provide symbol', 400)\n elif not request.form.get('shares'):\n return apology('must provide shares', 400)\n if not request.form.get('shares').isdigit():\n return apology('must be integer', 400)\n elif int(request.form.get('shares')) < 1:\n return apology('must be positive integer', 400)\n elif lookup(request.form.get('symbol')) == None:\n return apology('Must be a valid symbol', 400)\n quote = lookup(request.form.get('symbol'))\n shares = request.form.get('shares')\n cash = db.execute('SELECT cash FROM users WHERE id=?', session[\n 'user_id'])\n if cash[0]['cash'] < int(quote['price']) * int(shares):\n return apology(\"You can't affort this/these\", 400)\n db.execute(\n \"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))\"\n , session['user_id'], int(shares), quote['symbol'], float(quote\n ['price']))\n total = int(quote['price']) * int(shares)\n db.execute('UPDATE users SET cash=cash- (?) WHERE id=?', total,\n session['user_id'])\n return redirect('/')\n else:\n return render_template('buy.html')\n\n\[email protected]('/history')\n@login_required\ndef history():\n \"\"\"Show history of transactions\"\"\"\n rows = db.execute('SELECT * FROM record ORDER BY t1')\n return render_template('history.html', rows=rows)\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n \"\"\"Log user in\"\"\"\n session.clear()\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 403)\n elif not request.form.get('password'):\n return apology('must provide password', 403)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 1 or not check_password_hash(rows[0]['hash'],\n request.form.get('password')):\n return apology('invalid username and/or password', 403)\n session['user_id'] = rows[0]['id']\n return redirect('/')\n else:\n return render_template('login.html')\n\n\[email protected]('/logout')\ndef logout():\n \"\"\"Log user out\"\"\"\n session.clear()\n return redirect('/')\n\n\[email protected]('/quote', methods=['GET', 'POST'])\n@login_required\ndef quote():\n \"\"\"Get stock quote.\"\"\"\n if request.method == 'POST':\n quote = lookup(request.form.get('symbol'))\n if quote == None:\n return apology('Invalid symbol', 400)\n price = usd(quote['price'])\n return render_template('quoted.html', quote=quote, price=price)\n else:\n return render_template('quote.html')\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef register():\n \"\"\"Register user\"\"\"\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 400)\n elif not request.form.get('password'):\n return apology('must provide password', 400)\n elif not request.form.get('confirmation'):\n return apology('must comfirm password', 400)\n elif request.form.get('confirmation') != request.form.get('password'):\n return apology('Password not matches', 400)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 0:\n return apology('username used', 400)\n db.execute('INSERT INTO users (username,hash) VALUES (?,?)',\n request.form.get('username'), generate_password_hash(request.\n form.get('password')))\n return redirect('/')\n else:\n return render_template('register.html')\n\n\[email protected]('/sell', methods=['GET', 'POST'])\n@login_required\ndef sell():\n \"\"\"Sell shares of stock\"\"\"\n if request.method == 'POST':\n if not request.form.get('shares'):\n return apology('Please enter how much u want to sell', 400)\n sell = request.form.get('symbol')\n shares = request.form.get('shares')\n amount = db.execute(\n 'SELECT SUM(transactions) as amount FROM record WHERE userID=? AND symbol=? GROUP BY symbol HAVING transactions'\n , session['user_id'], sell)\n if amount[0]['amount'] < int(shares):\n return apology('You dont own that much shares', 400)\n quote = lookup(sell)\n price = quote['price']\n total = int(price) * int(shares)\n db.execute(\n \"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%s','now'))\"\n , session['user_id'], int(shares) * -1, quote['symbol'], price)\n db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,\n session['user_id'])\n return redirect('/')\n else:\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n return render_template('sell.html', rows=rows)\n\n\[email protected]('/HAX', methods=['GET', 'POST'])\n@login_required\ndef HAX():\n if request.method == 'POST':\n total = request.form.get('HAX')\n db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,\n session['user_id'])\n flash(u'HAX SUCCESSFULLY ACTIVATED!!!')\n return redirect('/')\n else:\n return render_template('HAX.html')\n\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n if not isinstance(e, HTTPException):\n e = InternalServerError()\n return apology(e.name, e.code)\n\n\nfor code in default_exceptions:\n app.errorhandler(code)(errorhandler)\n", "<import token>\n<assignment token>\n\n\[email protected]_request\ndef after_request(response):\n response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'\n response.headers['Expires'] = 0\n response.headers['Pragma'] = 'no-cache'\n return response\n\n\n<assignment token>\nSession(app)\n<assignment token>\nif not os.environ.get('API_KEY'):\n raise RuntimeError('API_KEY not set')\n\n\[email protected]('/')\n@login_required\ndef index():\n \"\"\"Show portfolio of stocks\"\"\"\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n cash = db.execute('SELECT cash FROM users WHERE id=?', session['user_id'])\n cash_ = cash[0]['cash']\n display = []\n total_share = 0\n for row in rows:\n symbol = str(row['symbol'])\n print(symbol)\n name = lookup(symbol)['name']\n shares = int(row['amount'])\n price = float(lookup(symbol)['price'])\n total = float(shares) * price\n total_share += total\n display.append({'symbol': symbol, 'name': name, 'shares': shares,\n 'price': price, 'total': total})\n total_money = total_share + cash[0]['cash']\n return render_template('index.html', display=display, total_money=\n total_money, cash=cash_)\n\n\[email protected]('/buy', methods=['GET', 'POST'])\n@login_required\ndef buy():\n \"\"\"Buy shares of stock\"\"\"\n if request.method == 'POST':\n if not request.form.get('symbol'):\n return apology('must provide symbol', 400)\n elif not request.form.get('shares'):\n return apology('must provide shares', 400)\n if not request.form.get('shares').isdigit():\n return apology('must be integer', 400)\n elif int(request.form.get('shares')) < 1:\n return apology('must be positive integer', 400)\n elif lookup(request.form.get('symbol')) == None:\n return apology('Must be a valid symbol', 400)\n quote = lookup(request.form.get('symbol'))\n shares = request.form.get('shares')\n cash = db.execute('SELECT cash FROM users WHERE id=?', session[\n 'user_id'])\n if cash[0]['cash'] < int(quote['price']) * int(shares):\n return apology(\"You can't affort this/these\", 400)\n db.execute(\n \"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))\"\n , session['user_id'], int(shares), quote['symbol'], float(quote\n ['price']))\n total = int(quote['price']) * int(shares)\n db.execute('UPDATE users SET cash=cash- (?) WHERE id=?', total,\n session['user_id'])\n return redirect('/')\n else:\n return render_template('buy.html')\n\n\[email protected]('/history')\n@login_required\ndef history():\n \"\"\"Show history of transactions\"\"\"\n rows = db.execute('SELECT * FROM record ORDER BY t1')\n return render_template('history.html', rows=rows)\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n \"\"\"Log user in\"\"\"\n session.clear()\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 403)\n elif not request.form.get('password'):\n return apology('must provide password', 403)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 1 or not check_password_hash(rows[0]['hash'],\n request.form.get('password')):\n return apology('invalid username and/or password', 403)\n session['user_id'] = rows[0]['id']\n return redirect('/')\n else:\n return render_template('login.html')\n\n\[email protected]('/logout')\ndef logout():\n \"\"\"Log user out\"\"\"\n session.clear()\n return redirect('/')\n\n\[email protected]('/quote', methods=['GET', 'POST'])\n@login_required\ndef quote():\n \"\"\"Get stock quote.\"\"\"\n if request.method == 'POST':\n quote = lookup(request.form.get('symbol'))\n if quote == None:\n return apology('Invalid symbol', 400)\n price = usd(quote['price'])\n return render_template('quoted.html', quote=quote, price=price)\n else:\n return render_template('quote.html')\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef register():\n \"\"\"Register user\"\"\"\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 400)\n elif not request.form.get('password'):\n return apology('must provide password', 400)\n elif not request.form.get('confirmation'):\n return apology('must comfirm password', 400)\n elif request.form.get('confirmation') != request.form.get('password'):\n return apology('Password not matches', 400)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 0:\n return apology('username used', 400)\n db.execute('INSERT INTO users (username,hash) VALUES (?,?)',\n request.form.get('username'), generate_password_hash(request.\n form.get('password')))\n return redirect('/')\n else:\n return render_template('register.html')\n\n\[email protected]('/sell', methods=['GET', 'POST'])\n@login_required\ndef sell():\n \"\"\"Sell shares of stock\"\"\"\n if request.method == 'POST':\n if not request.form.get('shares'):\n return apology('Please enter how much u want to sell', 400)\n sell = request.form.get('symbol')\n shares = request.form.get('shares')\n amount = db.execute(\n 'SELECT SUM(transactions) as amount FROM record WHERE userID=? AND symbol=? GROUP BY symbol HAVING transactions'\n , session['user_id'], sell)\n if amount[0]['amount'] < int(shares):\n return apology('You dont own that much shares', 400)\n quote = lookup(sell)\n price = quote['price']\n total = int(price) * int(shares)\n db.execute(\n \"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%s','now'))\"\n , session['user_id'], int(shares) * -1, quote['symbol'], price)\n db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,\n session['user_id'])\n return redirect('/')\n else:\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n return render_template('sell.html', rows=rows)\n\n\[email protected]('/HAX', methods=['GET', 'POST'])\n@login_required\ndef HAX():\n if request.method == 'POST':\n total = request.form.get('HAX')\n db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,\n session['user_id'])\n flash(u'HAX SUCCESSFULLY ACTIVATED!!!')\n return redirect('/')\n else:\n return render_template('HAX.html')\n\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n if not isinstance(e, HTTPException):\n e = InternalServerError()\n return apology(e.name, e.code)\n\n\nfor code in default_exceptions:\n app.errorhandler(code)(errorhandler)\n", "<import token>\n<assignment token>\n\n\[email protected]_request\ndef after_request(response):\n response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'\n response.headers['Expires'] = 0\n response.headers['Pragma'] = 'no-cache'\n return response\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\[email protected]('/')\n@login_required\ndef index():\n \"\"\"Show portfolio of stocks\"\"\"\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n cash = db.execute('SELECT cash FROM users WHERE id=?', session['user_id'])\n cash_ = cash[0]['cash']\n display = []\n total_share = 0\n for row in rows:\n symbol = str(row['symbol'])\n print(symbol)\n name = lookup(symbol)['name']\n shares = int(row['amount'])\n price = float(lookup(symbol)['price'])\n total = float(shares) * price\n total_share += total\n display.append({'symbol': symbol, 'name': name, 'shares': shares,\n 'price': price, 'total': total})\n total_money = total_share + cash[0]['cash']\n return render_template('index.html', display=display, total_money=\n total_money, cash=cash_)\n\n\[email protected]('/buy', methods=['GET', 'POST'])\n@login_required\ndef buy():\n \"\"\"Buy shares of stock\"\"\"\n if request.method == 'POST':\n if not request.form.get('symbol'):\n return apology('must provide symbol', 400)\n elif not request.form.get('shares'):\n return apology('must provide shares', 400)\n if not request.form.get('shares').isdigit():\n return apology('must be integer', 400)\n elif int(request.form.get('shares')) < 1:\n return apology('must be positive integer', 400)\n elif lookup(request.form.get('symbol')) == None:\n return apology('Must be a valid symbol', 400)\n quote = lookup(request.form.get('symbol'))\n shares = request.form.get('shares')\n cash = db.execute('SELECT cash FROM users WHERE id=?', session[\n 'user_id'])\n if cash[0]['cash'] < int(quote['price']) * int(shares):\n return apology(\"You can't affort this/these\", 400)\n db.execute(\n \"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))\"\n , session['user_id'], int(shares), quote['symbol'], float(quote\n ['price']))\n total = int(quote['price']) * int(shares)\n db.execute('UPDATE users SET cash=cash- (?) WHERE id=?', total,\n session['user_id'])\n return redirect('/')\n else:\n return render_template('buy.html')\n\n\[email protected]('/history')\n@login_required\ndef history():\n \"\"\"Show history of transactions\"\"\"\n rows = db.execute('SELECT * FROM record ORDER BY t1')\n return render_template('history.html', rows=rows)\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n \"\"\"Log user in\"\"\"\n session.clear()\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 403)\n elif not request.form.get('password'):\n return apology('must provide password', 403)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 1 or not check_password_hash(rows[0]['hash'],\n request.form.get('password')):\n return apology('invalid username and/or password', 403)\n session['user_id'] = rows[0]['id']\n return redirect('/')\n else:\n return render_template('login.html')\n\n\[email protected]('/logout')\ndef logout():\n \"\"\"Log user out\"\"\"\n session.clear()\n return redirect('/')\n\n\[email protected]('/quote', methods=['GET', 'POST'])\n@login_required\ndef quote():\n \"\"\"Get stock quote.\"\"\"\n if request.method == 'POST':\n quote = lookup(request.form.get('symbol'))\n if quote == None:\n return apology('Invalid symbol', 400)\n price = usd(quote['price'])\n return render_template('quoted.html', quote=quote, price=price)\n else:\n return render_template('quote.html')\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef register():\n \"\"\"Register user\"\"\"\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 400)\n elif not request.form.get('password'):\n return apology('must provide password', 400)\n elif not request.form.get('confirmation'):\n return apology('must comfirm password', 400)\n elif request.form.get('confirmation') != request.form.get('password'):\n return apology('Password not matches', 400)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 0:\n return apology('username used', 400)\n db.execute('INSERT INTO users (username,hash) VALUES (?,?)',\n request.form.get('username'), generate_password_hash(request.\n form.get('password')))\n return redirect('/')\n else:\n return render_template('register.html')\n\n\[email protected]('/sell', methods=['GET', 'POST'])\n@login_required\ndef sell():\n \"\"\"Sell shares of stock\"\"\"\n if request.method == 'POST':\n if not request.form.get('shares'):\n return apology('Please enter how much u want to sell', 400)\n sell = request.form.get('symbol')\n shares = request.form.get('shares')\n amount = db.execute(\n 'SELECT SUM(transactions) as amount FROM record WHERE userID=? AND symbol=? GROUP BY symbol HAVING transactions'\n , session['user_id'], sell)\n if amount[0]['amount'] < int(shares):\n return apology('You dont own that much shares', 400)\n quote = lookup(sell)\n price = quote['price']\n total = int(price) * int(shares)\n db.execute(\n \"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%s','now'))\"\n , session['user_id'], int(shares) * -1, quote['symbol'], price)\n db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,\n session['user_id'])\n return redirect('/')\n else:\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n return render_template('sell.html', rows=rows)\n\n\[email protected]('/HAX', methods=['GET', 'POST'])\n@login_required\ndef HAX():\n if request.method == 'POST':\n total = request.form.get('HAX')\n db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,\n session['user_id'])\n flash(u'HAX SUCCESSFULLY ACTIVATED!!!')\n return redirect('/')\n else:\n return render_template('HAX.html')\n\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n if not isinstance(e, HTTPException):\n e = InternalServerError()\n return apology(e.name, e.code)\n\n\n<code token>\n", "<import token>\n<assignment token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\[email protected]('/')\n@login_required\ndef index():\n \"\"\"Show portfolio of stocks\"\"\"\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n cash = db.execute('SELECT cash FROM users WHERE id=?', session['user_id'])\n cash_ = cash[0]['cash']\n display = []\n total_share = 0\n for row in rows:\n symbol = str(row['symbol'])\n print(symbol)\n name = lookup(symbol)['name']\n shares = int(row['amount'])\n price = float(lookup(symbol)['price'])\n total = float(shares) * price\n total_share += total\n display.append({'symbol': symbol, 'name': name, 'shares': shares,\n 'price': price, 'total': total})\n total_money = total_share + cash[0]['cash']\n return render_template('index.html', display=display, total_money=\n total_money, cash=cash_)\n\n\[email protected]('/buy', methods=['GET', 'POST'])\n@login_required\ndef buy():\n \"\"\"Buy shares of stock\"\"\"\n if request.method == 'POST':\n if not request.form.get('symbol'):\n return apology('must provide symbol', 400)\n elif not request.form.get('shares'):\n return apology('must provide shares', 400)\n if not request.form.get('shares').isdigit():\n return apology('must be integer', 400)\n elif int(request.form.get('shares')) < 1:\n return apology('must be positive integer', 400)\n elif lookup(request.form.get('symbol')) == None:\n return apology('Must be a valid symbol', 400)\n quote = lookup(request.form.get('symbol'))\n shares = request.form.get('shares')\n cash = db.execute('SELECT cash FROM users WHERE id=?', session[\n 'user_id'])\n if cash[0]['cash'] < int(quote['price']) * int(shares):\n return apology(\"You can't affort this/these\", 400)\n db.execute(\n \"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))\"\n , session['user_id'], int(shares), quote['symbol'], float(quote\n ['price']))\n total = int(quote['price']) * int(shares)\n db.execute('UPDATE users SET cash=cash- (?) WHERE id=?', total,\n session['user_id'])\n return redirect('/')\n else:\n return render_template('buy.html')\n\n\[email protected]('/history')\n@login_required\ndef history():\n \"\"\"Show history of transactions\"\"\"\n rows = db.execute('SELECT * FROM record ORDER BY t1')\n return render_template('history.html', rows=rows)\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n \"\"\"Log user in\"\"\"\n session.clear()\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 403)\n elif not request.form.get('password'):\n return apology('must provide password', 403)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 1 or not check_password_hash(rows[0]['hash'],\n request.form.get('password')):\n return apology('invalid username and/or password', 403)\n session['user_id'] = rows[0]['id']\n return redirect('/')\n else:\n return render_template('login.html')\n\n\[email protected]('/logout')\ndef logout():\n \"\"\"Log user out\"\"\"\n session.clear()\n return redirect('/')\n\n\[email protected]('/quote', methods=['GET', 'POST'])\n@login_required\ndef quote():\n \"\"\"Get stock quote.\"\"\"\n if request.method == 'POST':\n quote = lookup(request.form.get('symbol'))\n if quote == None:\n return apology('Invalid symbol', 400)\n price = usd(quote['price'])\n return render_template('quoted.html', quote=quote, price=price)\n else:\n return render_template('quote.html')\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef register():\n \"\"\"Register user\"\"\"\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 400)\n elif not request.form.get('password'):\n return apology('must provide password', 400)\n elif not request.form.get('confirmation'):\n return apology('must comfirm password', 400)\n elif request.form.get('confirmation') != request.form.get('password'):\n return apology('Password not matches', 400)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 0:\n return apology('username used', 400)\n db.execute('INSERT INTO users (username,hash) VALUES (?,?)',\n request.form.get('username'), generate_password_hash(request.\n form.get('password')))\n return redirect('/')\n else:\n return render_template('register.html')\n\n\[email protected]('/sell', methods=['GET', 'POST'])\n@login_required\ndef sell():\n \"\"\"Sell shares of stock\"\"\"\n if request.method == 'POST':\n if not request.form.get('shares'):\n return apology('Please enter how much u want to sell', 400)\n sell = request.form.get('symbol')\n shares = request.form.get('shares')\n amount = db.execute(\n 'SELECT SUM(transactions) as amount FROM record WHERE userID=? AND symbol=? GROUP BY symbol HAVING transactions'\n , session['user_id'], sell)\n if amount[0]['amount'] < int(shares):\n return apology('You dont own that much shares', 400)\n quote = lookup(sell)\n price = quote['price']\n total = int(price) * int(shares)\n db.execute(\n \"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%s','now'))\"\n , session['user_id'], int(shares) * -1, quote['symbol'], price)\n db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,\n session['user_id'])\n return redirect('/')\n else:\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n return render_template('sell.html', rows=rows)\n\n\[email protected]('/HAX', methods=['GET', 'POST'])\n@login_required\ndef HAX():\n if request.method == 'POST':\n total = request.form.get('HAX')\n db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,\n session['user_id'])\n flash(u'HAX SUCCESSFULLY ACTIVATED!!!')\n return redirect('/')\n else:\n return render_template('HAX.html')\n\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n if not isinstance(e, HTTPException):\n e = InternalServerError()\n return apology(e.name, e.code)\n\n\n<code token>\n", "<import token>\n<assignment token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\[email protected]('/')\n@login_required\ndef index():\n \"\"\"Show portfolio of stocks\"\"\"\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n cash = db.execute('SELECT cash FROM users WHERE id=?', session['user_id'])\n cash_ = cash[0]['cash']\n display = []\n total_share = 0\n for row in rows:\n symbol = str(row['symbol'])\n print(symbol)\n name = lookup(symbol)['name']\n shares = int(row['amount'])\n price = float(lookup(symbol)['price'])\n total = float(shares) * price\n total_share += total\n display.append({'symbol': symbol, 'name': name, 'shares': shares,\n 'price': price, 'total': total})\n total_money = total_share + cash[0]['cash']\n return render_template('index.html', display=display, total_money=\n total_money, cash=cash_)\n\n\n<function token>\n\n\[email protected]('/history')\n@login_required\ndef history():\n \"\"\"Show history of transactions\"\"\"\n rows = db.execute('SELECT * FROM record ORDER BY t1')\n return render_template('history.html', rows=rows)\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n \"\"\"Log user in\"\"\"\n session.clear()\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 403)\n elif not request.form.get('password'):\n return apology('must provide password', 403)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 1 or not check_password_hash(rows[0]['hash'],\n request.form.get('password')):\n return apology('invalid username and/or password', 403)\n session['user_id'] = rows[0]['id']\n return redirect('/')\n else:\n return render_template('login.html')\n\n\[email protected]('/logout')\ndef logout():\n \"\"\"Log user out\"\"\"\n session.clear()\n return redirect('/')\n\n\[email protected]('/quote', methods=['GET', 'POST'])\n@login_required\ndef quote():\n \"\"\"Get stock quote.\"\"\"\n if request.method == 'POST':\n quote = lookup(request.form.get('symbol'))\n if quote == None:\n return apology('Invalid symbol', 400)\n price = usd(quote['price'])\n return render_template('quoted.html', quote=quote, price=price)\n else:\n return render_template('quote.html')\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef register():\n \"\"\"Register user\"\"\"\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 400)\n elif not request.form.get('password'):\n return apology('must provide password', 400)\n elif not request.form.get('confirmation'):\n return apology('must comfirm password', 400)\n elif request.form.get('confirmation') != request.form.get('password'):\n return apology('Password not matches', 400)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 0:\n return apology('username used', 400)\n db.execute('INSERT INTO users (username,hash) VALUES (?,?)',\n request.form.get('username'), generate_password_hash(request.\n form.get('password')))\n return redirect('/')\n else:\n return render_template('register.html')\n\n\[email protected]('/sell', methods=['GET', 'POST'])\n@login_required\ndef sell():\n \"\"\"Sell shares of stock\"\"\"\n if request.method == 'POST':\n if not request.form.get('shares'):\n return apology('Please enter how much u want to sell', 400)\n sell = request.form.get('symbol')\n shares = request.form.get('shares')\n amount = db.execute(\n 'SELECT SUM(transactions) as amount FROM record WHERE userID=? AND symbol=? GROUP BY symbol HAVING transactions'\n , session['user_id'], sell)\n if amount[0]['amount'] < int(shares):\n return apology('You dont own that much shares', 400)\n quote = lookup(sell)\n price = quote['price']\n total = int(price) * int(shares)\n db.execute(\n \"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%s','now'))\"\n , session['user_id'], int(shares) * -1, quote['symbol'], price)\n db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,\n session['user_id'])\n return redirect('/')\n else:\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n return render_template('sell.html', rows=rows)\n\n\[email protected]('/HAX', methods=['GET', 'POST'])\n@login_required\ndef HAX():\n if request.method == 'POST':\n total = request.form.get('HAX')\n db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,\n session['user_id'])\n flash(u'HAX SUCCESSFULLY ACTIVATED!!!')\n return redirect('/')\n else:\n return render_template('HAX.html')\n\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n if not isinstance(e, HTTPException):\n e = InternalServerError()\n return apology(e.name, e.code)\n\n\n<code token>\n", "<import token>\n<assignment token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\[email protected]('/')\n@login_required\ndef index():\n \"\"\"Show portfolio of stocks\"\"\"\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n cash = db.execute('SELECT cash FROM users WHERE id=?', session['user_id'])\n cash_ = cash[0]['cash']\n display = []\n total_share = 0\n for row in rows:\n symbol = str(row['symbol'])\n print(symbol)\n name = lookup(symbol)['name']\n shares = int(row['amount'])\n price = float(lookup(symbol)['price'])\n total = float(shares) * price\n total_share += total\n display.append({'symbol': symbol, 'name': name, 'shares': shares,\n 'price': price, 'total': total})\n total_money = total_share + cash[0]['cash']\n return render_template('index.html', display=display, total_money=\n total_money, cash=cash_)\n\n\n<function token>\n\n\[email protected]('/history')\n@login_required\ndef history():\n \"\"\"Show history of transactions\"\"\"\n rows = db.execute('SELECT * FROM record ORDER BY t1')\n return render_template('history.html', rows=rows)\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n \"\"\"Log user in\"\"\"\n session.clear()\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 403)\n elif not request.form.get('password'):\n return apology('must provide password', 403)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 1 or not check_password_hash(rows[0]['hash'],\n request.form.get('password')):\n return apology('invalid username and/or password', 403)\n session['user_id'] = rows[0]['id']\n return redirect('/')\n else:\n return render_template('login.html')\n\n\[email protected]('/logout')\ndef logout():\n \"\"\"Log user out\"\"\"\n session.clear()\n return redirect('/')\n\n\[email protected]('/quote', methods=['GET', 'POST'])\n@login_required\ndef quote():\n \"\"\"Get stock quote.\"\"\"\n if request.method == 'POST':\n quote = lookup(request.form.get('symbol'))\n if quote == None:\n return apology('Invalid symbol', 400)\n price = usd(quote['price'])\n return render_template('quoted.html', quote=quote, price=price)\n else:\n return render_template('quote.html')\n\n\n<function token>\n\n\[email protected]('/sell', methods=['GET', 'POST'])\n@login_required\ndef sell():\n \"\"\"Sell shares of stock\"\"\"\n if request.method == 'POST':\n if not request.form.get('shares'):\n return apology('Please enter how much u want to sell', 400)\n sell = request.form.get('symbol')\n shares = request.form.get('shares')\n amount = db.execute(\n 'SELECT SUM(transactions) as amount FROM record WHERE userID=? AND symbol=? GROUP BY symbol HAVING transactions'\n , session['user_id'], sell)\n if amount[0]['amount'] < int(shares):\n return apology('You dont own that much shares', 400)\n quote = lookup(sell)\n price = quote['price']\n total = int(price) * int(shares)\n db.execute(\n \"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%s','now'))\"\n , session['user_id'], int(shares) * -1, quote['symbol'], price)\n db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,\n session['user_id'])\n return redirect('/')\n else:\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n return render_template('sell.html', rows=rows)\n\n\[email protected]('/HAX', methods=['GET', 'POST'])\n@login_required\ndef HAX():\n if request.method == 'POST':\n total = request.form.get('HAX')\n db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,\n session['user_id'])\n flash(u'HAX SUCCESSFULLY ACTIVATED!!!')\n return redirect('/')\n else:\n return render_template('HAX.html')\n\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n if not isinstance(e, HTTPException):\n e = InternalServerError()\n return apology(e.name, e.code)\n\n\n<code token>\n", "<import token>\n<assignment token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\[email protected]('/')\n@login_required\ndef index():\n \"\"\"Show portfolio of stocks\"\"\"\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n cash = db.execute('SELECT cash FROM users WHERE id=?', session['user_id'])\n cash_ = cash[0]['cash']\n display = []\n total_share = 0\n for row in rows:\n symbol = str(row['symbol'])\n print(symbol)\n name = lookup(symbol)['name']\n shares = int(row['amount'])\n price = float(lookup(symbol)['price'])\n total = float(shares) * price\n total_share += total\n display.append({'symbol': symbol, 'name': name, 'shares': shares,\n 'price': price, 'total': total})\n total_money = total_share + cash[0]['cash']\n return render_template('index.html', display=display, total_money=\n total_money, cash=cash_)\n\n\n<function token>\n\n\[email protected]('/history')\n@login_required\ndef history():\n \"\"\"Show history of transactions\"\"\"\n rows = db.execute('SELECT * FROM record ORDER BY t1')\n return render_template('history.html', rows=rows)\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n \"\"\"Log user in\"\"\"\n session.clear()\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 403)\n elif not request.form.get('password'):\n return apology('must provide password', 403)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 1 or not check_password_hash(rows[0]['hash'],\n request.form.get('password')):\n return apology('invalid username and/or password', 403)\n session['user_id'] = rows[0]['id']\n return redirect('/')\n else:\n return render_template('login.html')\n\n\[email protected]('/logout')\ndef logout():\n \"\"\"Log user out\"\"\"\n session.clear()\n return redirect('/')\n\n\[email protected]('/quote', methods=['GET', 'POST'])\n@login_required\ndef quote():\n \"\"\"Get stock quote.\"\"\"\n if request.method == 'POST':\n quote = lookup(request.form.get('symbol'))\n if quote == None:\n return apology('Invalid symbol', 400)\n price = usd(quote['price'])\n return render_template('quoted.html', quote=quote, price=price)\n else:\n return render_template('quote.html')\n\n\n<function token>\n<function token>\n\n\[email protected]('/HAX', methods=['GET', 'POST'])\n@login_required\ndef HAX():\n if request.method == 'POST':\n total = request.form.get('HAX')\n db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,\n session['user_id'])\n flash(u'HAX SUCCESSFULLY ACTIVATED!!!')\n return redirect('/')\n else:\n return render_template('HAX.html')\n\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n if not isinstance(e, HTTPException):\n e = InternalServerError()\n return apology(e.name, e.code)\n\n\n<code token>\n", "<import token>\n<assignment token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\[email protected]('/')\n@login_required\ndef index():\n \"\"\"Show portfolio of stocks\"\"\"\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n cash = db.execute('SELECT cash FROM users WHERE id=?', session['user_id'])\n cash_ = cash[0]['cash']\n display = []\n total_share = 0\n for row in rows:\n symbol = str(row['symbol'])\n print(symbol)\n name = lookup(symbol)['name']\n shares = int(row['amount'])\n price = float(lookup(symbol)['price'])\n total = float(shares) * price\n total_share += total\n display.append({'symbol': symbol, 'name': name, 'shares': shares,\n 'price': price, 'total': total})\n total_money = total_share + cash[0]['cash']\n return render_template('index.html', display=display, total_money=\n total_money, cash=cash_)\n\n\n<function token>\n\n\[email protected]('/history')\n@login_required\ndef history():\n \"\"\"Show history of transactions\"\"\"\n rows = db.execute('SELECT * FROM record ORDER BY t1')\n return render_template('history.html', rows=rows)\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n \"\"\"Log user in\"\"\"\n session.clear()\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 403)\n elif not request.form.get('password'):\n return apology('must provide password', 403)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 1 or not check_password_hash(rows[0]['hash'],\n request.form.get('password')):\n return apology('invalid username and/or password', 403)\n session['user_id'] = rows[0]['id']\n return redirect('/')\n else:\n return render_template('login.html')\n\n\[email protected]('/logout')\ndef logout():\n \"\"\"Log user out\"\"\"\n session.clear()\n return redirect('/')\n\n\[email protected]('/quote', methods=['GET', 'POST'])\n@login_required\ndef quote():\n \"\"\"Get stock quote.\"\"\"\n if request.method == 'POST':\n quote = lookup(request.form.get('symbol'))\n if quote == None:\n return apology('Invalid symbol', 400)\n price = usd(quote['price'])\n return render_template('quoted.html', quote=quote, price=price)\n else:\n return render_template('quote.html')\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n if not isinstance(e, HTTPException):\n e = InternalServerError()\n return apology(e.name, e.code)\n\n\n<code token>\n", "<import token>\n<assignment token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\[email protected]('/')\n@login_required\ndef index():\n \"\"\"Show portfolio of stocks\"\"\"\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n cash = db.execute('SELECT cash FROM users WHERE id=?', session['user_id'])\n cash_ = cash[0]['cash']\n display = []\n total_share = 0\n for row in rows:\n symbol = str(row['symbol'])\n print(symbol)\n name = lookup(symbol)['name']\n shares = int(row['amount'])\n price = float(lookup(symbol)['price'])\n total = float(shares) * price\n total_share += total\n display.append({'symbol': symbol, 'name': name, 'shares': shares,\n 'price': price, 'total': total})\n total_money = total_share + cash[0]['cash']\n return render_template('index.html', display=display, total_money=\n total_money, cash=cash_)\n\n\n<function token>\n\n\[email protected]('/history')\n@login_required\ndef history():\n \"\"\"Show history of transactions\"\"\"\n rows = db.execute('SELECT * FROM record ORDER BY t1')\n return render_template('history.html', rows=rows)\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n \"\"\"Log user in\"\"\"\n session.clear()\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 403)\n elif not request.form.get('password'):\n return apology('must provide password', 403)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 1 or not check_password_hash(rows[0]['hash'],\n request.form.get('password')):\n return apology('invalid username and/or password', 403)\n session['user_id'] = rows[0]['id']\n return redirect('/')\n else:\n return render_template('login.html')\n\n\[email protected]('/logout')\ndef logout():\n \"\"\"Log user out\"\"\"\n session.clear()\n return redirect('/')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n if not isinstance(e, HTTPException):\n e = InternalServerError()\n return apology(e.name, e.code)\n\n\n<code token>\n", "<import token>\n<assignment token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\[email protected]('/')\n@login_required\ndef index():\n \"\"\"Show portfolio of stocks\"\"\"\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n cash = db.execute('SELECT cash FROM users WHERE id=?', session['user_id'])\n cash_ = cash[0]['cash']\n display = []\n total_share = 0\n for row in rows:\n symbol = str(row['symbol'])\n print(symbol)\n name = lookup(symbol)['name']\n shares = int(row['amount'])\n price = float(lookup(symbol)['price'])\n total = float(shares) * price\n total_share += total\n display.append({'symbol': symbol, 'name': name, 'shares': shares,\n 'price': price, 'total': total})\n total_money = total_share + cash[0]['cash']\n return render_template('index.html', display=display, total_money=\n total_money, cash=cash_)\n\n\n<function token>\n\n\[email protected]('/history')\n@login_required\ndef history():\n \"\"\"Show history of transactions\"\"\"\n rows = db.execute('SELECT * FROM record ORDER BY t1')\n return render_template('history.html', rows=rows)\n\n\n<function token>\n\n\[email protected]('/logout')\ndef logout():\n \"\"\"Log user out\"\"\"\n session.clear()\n return redirect('/')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n if not isinstance(e, HTTPException):\n e = InternalServerError()\n return apology(e.name, e.code)\n\n\n<code token>\n", "<import token>\n<assignment token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n\n\[email protected]('/history')\n@login_required\ndef history():\n \"\"\"Show history of transactions\"\"\"\n rows = db.execute('SELECT * FROM record ORDER BY t1')\n return render_template('history.html', rows=rows)\n\n\n<function token>\n\n\[email protected]('/logout')\ndef logout():\n \"\"\"Log user out\"\"\"\n session.clear()\n return redirect('/')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n if not isinstance(e, HTTPException):\n e = InternalServerError()\n return apology(e.name, e.code)\n\n\n<code token>\n", "<import token>\n<assignment token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n\n\[email protected]('/history')\n@login_required\ndef history():\n \"\"\"Show history of transactions\"\"\"\n rows = db.execute('SELECT * FROM record ORDER BY t1')\n return render_template('history.html', rows=rows)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n if not isinstance(e, HTTPException):\n e = InternalServerError()\n return apology(e.name, e.code)\n\n\n<code token>\n", "<import token>\n<assignment token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n if not isinstance(e, HTTPException):\n e = InternalServerError()\n return apology(e.name, e.code)\n\n\n<code token>\n", "<import token>\n<assignment token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n" ]
false
311
097a87f7f1346e5db1599e59680232912348aef7
# -*- coding:utf-8 -*- from odoo import api, models, fields, _ class hrsalaryRule(models.Model): _inherit = "hr.salary.rule" is_tax_fdfp = fields.Boolean("Est un impôt FDFP")
[ "# -*- coding:utf-8 -*-\r\n\r\nfrom odoo import api, models, fields, _\r\n\r\n\r\nclass hrsalaryRule(models.Model):\r\n _inherit = \"hr.salary.rule\"\r\n\r\n is_tax_fdfp = fields.Boolean(\"Est un impôt FDFP\")", "from odoo import api, models, fields, _\n\n\nclass hrsalaryRule(models.Model):\n _inherit = 'hr.salary.rule'\n is_tax_fdfp = fields.Boolean('Est un impôt FDFP')\n", "<import token>\n\n\nclass hrsalaryRule(models.Model):\n _inherit = 'hr.salary.rule'\n is_tax_fdfp = fields.Boolean('Est un impôt FDFP')\n", "<import token>\n\n\nclass hrsalaryRule(models.Model):\n <assignment token>\n <assignment token>\n", "<import token>\n<class token>\n" ]
false
312
a126b1775ffe1ba1aebc288ce17fac8ada0b0756
import cv2 import numpy as np import pandas as pd import tkinter as tk import random from tkinter import * from tkinter import ttk from tkinter import messagebox from tkinter import Scale,Tk from tkinter.ttk import Notebook refPt = [] PtBGR=[] r=[] g=[] b=[] refPt = [] Serial=[] PtBGR=[] r1=[] r2=[] r3=[] r4=[] rate=[] rate2=[] rate3=[] r6=[] r7=[] r8=[] r9=[] add=[] add2=[] add3=[] color_name=[] locate=[] brand=[] boolean=False root = tk.Tk() root.geometry("400x200") root.configure(background='white') def quitScreen(): messagebox.showinfo("collecting data", "點擊視窗開始分析") root.destroy() root2=Tk() root2.destroy() def getTextInput(): global result,result2 result=text.get(1.0, tk.END+"-1c") result2=text2.get(1.0, tk.END+"-1c") img = PhotoImage(file="buttons/QJsmall.png") panel = tk.Label(root, image = img) panel.grid(row=0,column=0,columnspan=3) labelmode = tk.Label(root,text = "請輸入圖片完整名稱\n ex:104432 w7.jpg",bg="white") labelmode.configure(font=("微軟正黑體", 10)) labelmode.grid(row=1) text=tk.Text(root, width=20,height=1) text.insert("insert",".jpg") text.configure(font=("微軟正黑體", 10)) text.grid(row=1,column=2) labelmode2 = tk.Label(root,text = "請輸入讀取資料庫名稱\n ex:PureColorBig.csv",bg="white") labelmode2.configure(font=("微軟正黑體", 10)) labelmode2.grid(row=2) text2=tk.Text(root, width=20,height=1) text2.insert("insert","PureColorBig.csv") text2.configure(font=("微軟正黑體", 10)) text2.grid(row=2,column=2) img_confirm=PhotoImage(file="buttons/confirm.png") img_start=PhotoImage(file="buttons/start.png") btnRead=tk.Button(root, image=img_confirm,text=" ",relief='flat', command=getTextInput) btnRead.grid(row=5,column=1) btnRead2=tk.Button(root, image=img_start,text=" ",relief='flat', command=quitScreen) btnRead2.grid(row=5,column=2) root.mainloop() def Result_Print(): window=Tk() window.title("分析結果") window.geometry("600x900") frame2=Frame(window) frame2.pack(fill="both") tablayout=Notebook(frame2) tablayout2=Notebook(frame2) #交叉配對 ntab1=Frame(tablayout2) ntab1.pack(fill="both") for row in range(len(name_n)): for column in range(1): label=Label(ntab1,width=25,height=2,text=name_n[row],bg="black",fg="white",padx=1,pady=1) label.grid(row=row,column=column,sticky="nsew",padx=1,pady=1) ntab1.grid_columnconfigure(column,weight=1) for row in range(len(name_n)): for column in range(1): label=Label(ntab1,width=5,height=2,text="%s" %rate_n[row],bg="black",fg="white",padx=1,pady=1) label.grid(row=row,column=1,sticky="nsew",padx=1,pady=1) ntab1.grid_columnconfigure(column,weight=1) for row in range(len(name_n)): for column in range(1): label=Label(ntab1,width=12,height=2,text="% 相似程度",bg="black",fg="white",padx=1,pady=1) label.grid(row=row,column=2,sticky="nsew",padx=1,pady=1) ntab1.grid_columnconfigure(column,weight=1) tablayout2.add(ntab1,text="交叉配對結果") ntab2=Frame(tablayout2) ntab2.pack(fill="both") for row in range(len(ncol)): for column in range(1): label=Label(ntab2,width=22,height=1,text=ncol[row],bg="black",fg="white",padx=1,pady=1) label.grid(row=row,column=0,sticky="nsew",padx=1,pady=1) ntab1.grid_columnconfigure(column,weight=1) for row in range(len(ncol)): for column in range(1): label=Label(ntab2,width=22,height=1,text=row_nf3[row],bg="black",fg="white",padx=1,pady=1) label.grid(row=row,column=1,sticky="nsew",padx=1,pady=1) ntab1.grid_columnconfigure(column,weight=1) tablayout2.add(ntab2,text="配方1") ntab3=Frame(tablayout2) ntab3.pack(fill="both") for row in range(len(ncol)): for column in range(1): label=Label(ntab3,width=22,height=1,text=ncol[row],bg="black",fg="white",padx=1,pady=1) label.grid(row=row,column=0,sticky="nsew",padx=1,pady=1) ntab1.grid_columnconfigure(column,weight=1) for row in range(len(ncol)): for column in range(1): label=Label(ntab3,width=22,height=1,text=row_nf32[row],bg="black",fg="white",padx=1,pady=1) label.grid(row=row,column=1,sticky="nsew",padx=1,pady=1) ntab1.grid_columnconfigure(column,weight=1) tablayout2.add(ntab3,text="配方2") ntab4=Frame(tablayout2) ntab4.pack(fill="both") for row in range(len(ncol)): for column in range(1): label=Label(ntab4,width=22,height=1,text=ncol[row],bg="black",fg="white",padx=1,pady=1) label.grid(row=row,column=0,sticky="nsew",padx=1,pady=1) ntab1.grid_columnconfigure(column,weight=1) for row in range(len(ncol)): for column in range(1): label=Label(ntab4,width=22,height=1,text=row_nf33[row],bg="black",fg="white",padx=1,pady=1) label.grid(row=row,column=1,sticky="nsew",padx=1,pady=1) ntab1.grid_columnconfigure(column,weight=1) tablayout2.add(ntab4,text="配方3") ntab5=Frame(tablayout2) ntab5.pack(fill="both") for row in range(len(ncol)): for column in range(1): label=Label(ntab5,width=22,height=1,text=ncol[row],bg="black",fg="white",padx=1,pady=1) label.grid(row=row,column=0,sticky="nsew",padx=1,pady=1) ntab1.grid_columnconfigure(column,weight=1) for row in range(len(ncol)): for column in range(1): label=Label(ntab5,width=22,height=1,text=row_nf3[row],bg="black",fg="white",padx=1,pady=1) label.grid(row=row,column=1,sticky="nsew",padx=1,pady=1) ntab1.grid_columnconfigure(column,weight=1) tablayout2.add(ntab5,text="最接近配方") #顏色分類 tab1=Frame(tablayout) tab1.pack(fill="both") for row in range(len(name)): for column in range(1): label=Label(tab1,width=25,height=2,text=name[row],bg="black",fg="white",padx=1,pady=1) label.grid(row=row,column=column,sticky="nsew",padx=1,pady=1) tab1.grid_columnconfigure(column,weight=1) for row in range(len(name)): for column in range(1): label=Label(tab1,width=5,height=2,text="%s" %rate[row],bg="black",fg="white",padx=1,pady=1) label.grid(row=row,column=1,sticky="nsew",padx=1,pady=1) tab1.grid_columnconfigure(column,weight=1) for row in range(len(name)): for column in range(1): label=Label(tab1,width=12,height=2,text="% 相似程度",bg="black",fg="white",padx=1,pady=1) label.grid(row=row,column=2,sticky="nsew",padx=1,pady=1) tab1.grid_columnconfigure(column,weight=1) tablayout.add(tab1,text="顏色分類結果") tab2=Frame(tablayout) tab2.pack(fill="both") for row in range(len(col)): for column in range(1): label=Label(tab2,width=22,height=1,text=col[row],bg="black",fg="white",padx=1,pady=1) label.grid(row=row,column=0,sticky="nsew",padx=1,pady=1) tab1.grid_columnconfigure(column,weight=1) for row in range(len(col)): for column in range(1): label=Label(tab2,width=22,height=1,text=row_df3[row],bg="black",fg="white",padx=1,pady=1) label.grid(row=row,column=1,sticky="nsew",padx=1,pady=1) tab1.grid_columnconfigure(column,weight=1) tablayout.add(tab2,text="配方1") tab3=Frame(tablayout) tab3.pack(fill="both") for row in range(len(col)): for column in range(1): label=Label(tab3,width=22,height=1,text=col[row],bg="black",fg="white",padx=1,pady=1) label.grid(row=row,column=0,sticky="nsew",padx=1,pady=1) tab1.grid_columnconfigure(column,weight=1) for row in range(len(col)): for column in range(1): label=Label(tab3,width=22,height=1,text=row_df32[row],bg="black",fg="white",padx=1,pady=1) label.grid(row=row,column=1,sticky="nsew",padx=1,pady=1) tab1.grid_columnconfigure(column,weight=1) tablayout.add(tab3,text="配方2") tab4=Frame(tablayout) tab4.pack(fill="both") for row in range(len(col)): for column in range(1): label=Label(tab4,width=22,height=1,text=col[row],bg="black",fg="white",padx=1,pady=1) label.grid(row=row,column=0,sticky="nsew",padx=1,pady=1) tab1.grid_columnconfigure(column,weight=1) for row in range(len(col)): for column in range(1): label=Label(tab4,width=22,height=1,text=row_df33[row],bg="black",fg="white",padx=1,pady=1) label.grid(row=row,column=1,sticky="nsew",padx=1,pady=1) tab1.grid_columnconfigure(column,weight=1) tablayout.add(tab4,text="配方3") tab5=Frame(tablayout) tab5.pack(fill="both") for row in range(len(col)): for column in range(1): label=Label(tab5,width=22,height=1,text=col[row],bg="black",fg="white",padx=1,pady=1) label.grid(row=row,column=0,sticky="nsew",padx=1,pady=1) tab1.grid_columnconfigure(column,weight=1) for row in range(len(col)): for column in range(1): label=Label(tab5,width=22,height=1,text=row_text[row],bg="black",fg="white",padx=1,pady=1) label.grid(row=row,column=1,sticky="nsew",padx=1,pady=1) tab1.grid_columnconfigure(column,weight=1) tablayout.add(tab5,text="最接近配方") tablayout.pack() tablayout2.pack() window.mainloop() def CircleCallback(event,x,y,flags,param): n=8 global refPt,PtBGR,w,h,Serial,r1,r2,r3,r4,rate,rate2,rate3,r6,r7,r8,r9,add,add2,add3,color,b,g,r,df3,name,rate,col,row_text global row_df3,row_df32,row_df33,row_text2,row_nf3,row_nf32,nf3,row_nf33,name_n,rate_n,ncol if event == cv2.EVENT_LBUTTONDOWN: #下面n代表取樣點數 若n越大則越精準一般不建議超過1000 n=500 for c in range(0,n): c+=1 #若n改變下面499改為n-1 ranx=(random.randint(0,499)) rany=(random.randint(0,499)) refPt.append((ranx,rany)) b, g, r = img[ranx,rany] PtBGR.append((b,g,r)) #print(PtBGR[0:n]) b=[x[0] for x in PtBGR] g=[x[1] for x in PtBGR] r=[x[2] for x in PtBGR] if len(refPt)==n: BAvr=(round(sum(b[0:n])/n)) GAvr=(round(sum(g[0:n])/n)) RAvr=(round(sum(r[0:n])/n)) SumRGB=(BAvr+GAvr+RAvr) SumAvr=(round(SumRGB/3)) color_def(BAvr,GAvr,RAvr) color_name.append(color) AvrRGB={'R':RAvr,'G':GAvr,'B':BAvr,'Sum':SumRGB,'Avr':SumAvr,'color':color_name} df_test = pd.DataFrame(AvrRGB,index=[0]) dfread = pd.read_csv(".data base\\%s" %(result2)) dfread['A']= round((dfread['R'] + dfread['G'] + dfread['B'])/3) dfread['S'] = dfread['R'] + dfread['G'] + dfread['B'] #交叉比對法 nf=pd.DataFrame(list(zip(r,g,b)),columns=['R','G','B']) nfread=dfread[['Serial no','R','G','B']] loan=pd.merge(nf,nfread) group=loan.groupby('Serial no') Newnf=group.count() Newnf['P']=round((Newnf['R']/Newnf['R'].sum())* 100) Newnf=Newnf.sort_values(by=['R'],ascending=False) Rate=Newnf['P'].tolist() Newnf.columns = [' '.join(col).strip() for col in Newnf.columns.values] nf2=pd.DataFrame(Newnf.to_records()) nf2=nf2.head(5) print(nf2) if(len(nf2['Serial no'])==0): i=0 j=0 k=0 elif(len(nf2['Serial no'])==1): i=nf2.at[0,'Serial no'] j=0 k=0 elif(len(nf2['Serial no'])==2): i=nf2.at[0,'Serial no'] j=nf2.at[1,'Serial no'] k=0 else: i=nf2.at[0,'Serial no'] j=nf2.at[1,'Serial no'] k=nf2.at[2,'Serial no'] print(k) nf3=dfread.loc[(dfread['Serial no']==i)].head(1) nf4=dfread.loc[(dfread['Serial no']==j)].head(1) nf5=dfread.loc[(dfread['Serial no']==k)].head(1) nf3=nf3.drop(['R','G','B','color','A','S'],axis=1) nf4=nf4.drop(['R','G','B','color','A','S'],axis=1) nf5=nf5.drop(['R','G','B','color','A','S'],axis=1) nf=pd.concat([nf3, nf4,nf5]) nf.to_csv(".data base\\test_result2.csv",index=False,encoding="utf_8_sig") print(nf) ncol=list(nf.columns) if(len(nf2['Serial no'])==0): root=tk.Tk() root.withdraw() messagebox.showinfo("失敗", "未找到符合資料") elif(len(nf2['Serial no'])==1): row_nf3=nf3.iloc[0].tolist() row_nf32=['x','x','x','x','x','x','x','x','x','x','x','x','x','x','x','x'] row_nf33=['x','x','x','x','x','x','x','x','x','x','x','x','x','x','x','x'] elif(len(nf2['Serial no'])==2): row_nf3=nf3.iloc[0].tolist() row_nf32=nf4.iloc[0].tolist() row_nf33=['x','x','x','x','x','x','x','x','x','x','x','x','x','x','x','x'] else: row_nf3=nf3.iloc[0].tolist() row_nf32=nf4.iloc[0].tolist() print(row_nf32) row_nf33=nf5.iloc[0].tolist() name_n=nf['Serial no'].tolist() rate_n=Rate #顏色分類法 #(可以改)當需要寬鬆一點的比對,刪除下面一段的上下兩個''' ''' newdf1=dfread.loc[(dfread['color']==color)|(dfread['A']==SumAvr)] newdf2=dfread.loc[(dfread['S']<=(SumRGB+2))&(dfread['S']>=(SumRGB-2))] newdf=pd.concat([newdf1, newdf2]) ''' #(可以改)當需要嚴格一點的比對,刪除下面一段的上下兩個''' ''' newdf=dfread.loc[(dfread['A']==SumAvr)|(dfread['S']==SumRGB)] newdf=newdf.loc[(newdf['color']==color)] ''' #並在下面一行的開頭加上# newdf=dfread.loc[(dfread['color']==color)|(dfread['A']==SumAvr)|(dfread['S']==SumRGB)] newdf.insert(1,'Rdif',newdf[['R']].add(-RAvr)) newdf.insert(2,'Gdif',newdf[['G']].add(-GAvr)) newdf.insert(3,'Bdif',newdf[['B']].add(-BAvr)) newdf.insert(4,'Adif',abs(newdf[['A']].add(-SumAvr))) newdf.insert(5,'Sdif',abs(newdf[['S']].add(-SumRGB))) df=newdf.sort_values(by=['Sdif', 'Adif'], ascending=True).head(100) df.insert(1,'dalta',abs(df['Rdif']+df['Gdif']+df['Bdif'])) df=df.sort_values(by=['dalta'],ascending=True) data=df[['Serial no','color']] group=data.groupby('Serial no') datacount=group.count() df=df.merge(datacount,left_on='Serial no',right_index=True) df=df.sort_values(by=['color_y'],ascending=False) df3=df.drop_duplicates('Serial no', keep='first', inplace=False).head() print(df3) df3.to_csv(".data base\\test_result.csv",index=False,encoding="utf_8_sig") if df3.empty ==True: root=tk.Tk() root.withdraw() messagebox.showinfo("失敗", "未找到符合資料") elif len(df3)<=2: root=tk.Tk() root.withdraw() messagebox.showinfo("失敗", "只找到少數資料\n 已存在test_result") else: Zero=df3.loc[(df3['Rdif']==0)&(df3['Gdif']==0)&(df3['Bdif']==0)] Zero=Zero.head(3) if Zero.empty==False: Zero=Zero.drop(['R','G','B','dalta','Rdif','Gdif','Bdif','A','S','Adif','Sdif','color_x','color_y'],axis=1) name=df3['Serial no'].tolist() rate=df3['color_y'].tolist() col=list(Zero.columns) row_text=Zero.iloc[0].tolist() df3=df3.drop(['R','G','B','dalta','Rdif','Gdif','Bdif','A','S','Adif','Sdif','color_x','color_y'],axis=1) row_df3=df3.iloc[0].tolist() row_df32=df3.iloc[1].tolist() row_df33=df3.iloc[2].tolist() Result_Print() print('0') print(Zero) else: filtdf=df3.loc[(df3['A']>=SumAvr)] filtdf=filtdf.sort_values(by=['Rdif','Gdif','Bdif']).head() Neg_filtdf=df3.loc[(df3['A']<SumAvr)] Neg_filtdf=Neg_filtdf.sort_values(by=['Rdif','Gdif','Bdif']).head() if Neg_filtdf.empty==True and filtdf.empty ==True: root=tk.Tk() root.withdraw() messagebox.showinfo("失敗", "未找到符合資料") else: filtdf=filtdf.drop(['R','G','B','dalta','Rdif','Gdif','Bdif','A','S','Adif','Sdif','color_x','color_y'],axis=1) name=df3['Serial no'].tolist() rate=df3['color_y'].tolist() col=list(filtdf.columns) row_text=filtdf.iloc[0].tolist() df3=df3.drop(['R','G','B','dalta','Rdif','Gdif','Bdif','A','S','Adif','Sdif','color_x','color_y'],axis=1) row_df3=df3.iloc[0].tolist() row_df32=df3.iloc[1].tolist() row_df33=df3.iloc[2].tolist() Result_Print() print("最接近的為1",filtdf.head(1)) def color_def(BAvr,GAvr,RAvr): global color if abs(int(BAvr)-int(GAvr))<=1 and abs(int(BAvr)-int(RAvr))<=1: color='White' return color elif BAvr>=GAvr and BAvr>=RAvr: if BAvr-GAvr>3 and BAvr-RAvr>=3: color='Blue' return color elif BAvr-GAvr<3: color='Cyan' return color else: color='Purple' return color elif GAvr>=RAvr and GAvr>=BAvr: if GAvr-RAvr>3 or GAvr-BAvr>3: color='Green' return color elif GAvr-RAvr<3: color='Yellow' return color else: color='Cyan' return color elif RAvr>=GAvr and RAvr>=BAvr: if RAvr-GAvr>=3 and RAvr-BAvr>=3: color='Red' return color elif RAvr-GAvr<3: color='Yellow' return color else: color='Purple' return color else: color='White' #img=cv2.imdecode(np.fromfile(r"D:\桌面\JA Material\JA-material\pure\%s" % (result),dtype=np.uint8),-1) img=cv2.imdecode(np.fromfile(r".pure\%s" % (result),dtype=np.uint8),-1) cv2.namedWindow('mouse_callback') # bind the callback function to window cv2.setMouseCallback('mouse_callback',CircleCallback) def main(): while (True): cv2.imshow('mouse_callback',img) if cv2.waitKey(20) == 27: break cv2.destroyAllWindows() if __name__ == "__main__": main()
[ "import cv2\nimport numpy as np\nimport pandas as pd\nimport tkinter as tk\nimport random\nfrom tkinter import *\nfrom tkinter import ttk\nfrom tkinter import messagebox\nfrom tkinter import Scale,Tk\nfrom tkinter.ttk import Notebook\n\nrefPt = []\nPtBGR=[]\nr=[]\ng=[]\nb=[]\nrefPt = []\nSerial=[]\nPtBGR=[]\nr1=[]\nr2=[]\nr3=[]\nr4=[]\nrate=[]\nrate2=[]\nrate3=[]\nr6=[]\nr7=[]\nr8=[]\nr9=[]\nadd=[]\nadd2=[]\nadd3=[]\ncolor_name=[]\nlocate=[]\nbrand=[]\nboolean=False\n\n\nroot = tk.Tk()\nroot.geometry(\"400x200\")\nroot.configure(background='white')\n\ndef quitScreen():\n messagebox.showinfo(\"collecting data\", \"點擊視窗開始分析\")\n root.destroy()\n root2=Tk()\n root2.destroy()\n \ndef getTextInput():\n global result,result2\n result=text.get(1.0, tk.END+\"-1c\")\n result2=text2.get(1.0, tk.END+\"-1c\")\n\nimg = PhotoImage(file=\"buttons/QJsmall.png\")\npanel = tk.Label(root, image = img)\npanel.grid(row=0,column=0,columnspan=3)\n\nlabelmode = tk.Label(root,text = \"請輸入圖片完整名稱\\n ex:104432 w7.jpg\",bg=\"white\")\nlabelmode.configure(font=(\"微軟正黑體\", 10))\nlabelmode.grid(row=1)\ntext=tk.Text(root, width=20,height=1)\ntext.insert(\"insert\",\".jpg\")\ntext.configure(font=(\"微軟正黑體\", 10))\ntext.grid(row=1,column=2)\n\nlabelmode2 = tk.Label(root,text = \"請輸入讀取資料庫名稱\\n ex:PureColorBig.csv\",bg=\"white\")\nlabelmode2.configure(font=(\"微軟正黑體\", 10))\nlabelmode2.grid(row=2)\ntext2=tk.Text(root, width=20,height=1)\ntext2.insert(\"insert\",\"PureColorBig.csv\")\ntext2.configure(font=(\"微軟正黑體\", 10))\ntext2.grid(row=2,column=2)\n\nimg_confirm=PhotoImage(file=\"buttons/confirm.png\")\nimg_start=PhotoImage(file=\"buttons/start.png\")\nbtnRead=tk.Button(root, image=img_confirm,text=\" \",relief='flat', \n command=getTextInput)\n\nbtnRead.grid(row=5,column=1)\n\nbtnRead2=tk.Button(root, image=img_start,text=\" \",relief='flat', \n command=quitScreen)\n\nbtnRead2.grid(row=5,column=2)\n\nroot.mainloop()\n\n\n\n\ndef Result_Print():\n window=Tk()\n window.title(\"分析結果\")\n window.geometry(\"600x900\")\n \n frame2=Frame(window)\n frame2.pack(fill=\"both\")\n\n \n tablayout=Notebook(frame2)\n tablayout2=Notebook(frame2)\n\n\n #交叉配對\n ntab1=Frame(tablayout2)\n ntab1.pack(fill=\"both\")\n for row in range(len(name_n)):\n for column in range(1):\n label=Label(ntab1,width=25,height=2,text=name_n[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=column,sticky=\"nsew\",padx=1,pady=1)\n ntab1.grid_columnconfigure(column,weight=1)\n for row in range(len(name_n)):\n for column in range(1):\n label=Label(ntab1,width=5,height=2,text=\"%s\" %rate_n[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=1,sticky=\"nsew\",padx=1,pady=1)\n ntab1.grid_columnconfigure(column,weight=1)\n\n for row in range(len(name_n)):\n for column in range(1):\n label=Label(ntab1,width=12,height=2,text=\"% 相似程度\",bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=2,sticky=\"nsew\",padx=1,pady=1)\n ntab1.grid_columnconfigure(column,weight=1)\n tablayout2.add(ntab1,text=\"交叉配對結果\")\n\n ntab2=Frame(tablayout2)\n ntab2.pack(fill=\"both\")\n \n for row in range(len(ncol)):\n for column in range(1):\n label=Label(ntab2,width=22,height=1,text=ncol[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=0,sticky=\"nsew\",padx=1,pady=1)\n ntab1.grid_columnconfigure(column,weight=1)\n\n for row in range(len(ncol)):\n for column in range(1):\n label=Label(ntab2,width=22,height=1,text=row_nf3[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=1,sticky=\"nsew\",padx=1,pady=1)\n ntab1.grid_columnconfigure(column,weight=1)\n \n tablayout2.add(ntab2,text=\"配方1\")\n\n ntab3=Frame(tablayout2)\n ntab3.pack(fill=\"both\")\n \n for row in range(len(ncol)):\n for column in range(1):\n label=Label(ntab3,width=22,height=1,text=ncol[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=0,sticky=\"nsew\",padx=1,pady=1)\n ntab1.grid_columnconfigure(column,weight=1)\n\n for row in range(len(ncol)):\n for column in range(1):\n label=Label(ntab3,width=22,height=1,text=row_nf32[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=1,sticky=\"nsew\",padx=1,pady=1)\n ntab1.grid_columnconfigure(column,weight=1)\n \n tablayout2.add(ntab3,text=\"配方2\")\n\n ntab4=Frame(tablayout2)\n ntab4.pack(fill=\"both\")\n \n for row in range(len(ncol)):\n for column in range(1):\n label=Label(ntab4,width=22,height=1,text=ncol[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=0,sticky=\"nsew\",padx=1,pady=1)\n ntab1.grid_columnconfigure(column,weight=1)\n\n for row in range(len(ncol)):\n for column in range(1):\n label=Label(ntab4,width=22,height=1,text=row_nf33[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=1,sticky=\"nsew\",padx=1,pady=1)\n ntab1.grid_columnconfigure(column,weight=1)\n \n tablayout2.add(ntab4,text=\"配方3\")\n\n ntab5=Frame(tablayout2)\n ntab5.pack(fill=\"both\")\n \n for row in range(len(ncol)):\n for column in range(1):\n label=Label(ntab5,width=22,height=1,text=ncol[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=0,sticky=\"nsew\",padx=1,pady=1)\n ntab1.grid_columnconfigure(column,weight=1)\n\n for row in range(len(ncol)):\n for column in range(1):\n label=Label(ntab5,width=22,height=1,text=row_nf3[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=1,sticky=\"nsew\",padx=1,pady=1)\n ntab1.grid_columnconfigure(column,weight=1)\n \n tablayout2.add(ntab5,text=\"最接近配方\")\n\n\n\n #顏色分類\n tab1=Frame(tablayout)\n tab1.pack(fill=\"both\")\n for row in range(len(name)):\n for column in range(1):\n label=Label(tab1,width=25,height=2,text=name[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=column,sticky=\"nsew\",padx=1,pady=1)\n tab1.grid_columnconfigure(column,weight=1)\n for row in range(len(name)):\n for column in range(1):\n label=Label(tab1,width=5,height=2,text=\"%s\" %rate[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=1,sticky=\"nsew\",padx=1,pady=1)\n tab1.grid_columnconfigure(column,weight=1)\n\n for row in range(len(name)):\n for column in range(1):\n label=Label(tab1,width=12,height=2,text=\"% 相似程度\",bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=2,sticky=\"nsew\",padx=1,pady=1)\n tab1.grid_columnconfigure(column,weight=1)\n tablayout.add(tab1,text=\"顏色分類結果\")\n \n tab2=Frame(tablayout)\n tab2.pack(fill=\"both\")\n \n for row in range(len(col)):\n for column in range(1):\n label=Label(tab2,width=22,height=1,text=col[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=0,sticky=\"nsew\",padx=1,pady=1)\n tab1.grid_columnconfigure(column,weight=1)\n\n for row in range(len(col)):\n for column in range(1):\n label=Label(tab2,width=22,height=1,text=row_df3[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=1,sticky=\"nsew\",padx=1,pady=1)\n tab1.grid_columnconfigure(column,weight=1)\n \n tablayout.add(tab2,text=\"配方1\")\n\n tab3=Frame(tablayout)\n tab3.pack(fill=\"both\")\n \n for row in range(len(col)):\n for column in range(1):\n label=Label(tab3,width=22,height=1,text=col[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=0,sticky=\"nsew\",padx=1,pady=1)\n tab1.grid_columnconfigure(column,weight=1)\n\n for row in range(len(col)):\n for column in range(1):\n label=Label(tab3,width=22,height=1,text=row_df32[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=1,sticky=\"nsew\",padx=1,pady=1)\n tab1.grid_columnconfigure(column,weight=1)\n \n tablayout.add(tab3,text=\"配方2\")\n\n tab4=Frame(tablayout)\n tab4.pack(fill=\"both\")\n \n for row in range(len(col)):\n for column in range(1):\n label=Label(tab4,width=22,height=1,text=col[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=0,sticky=\"nsew\",padx=1,pady=1)\n tab1.grid_columnconfigure(column,weight=1)\n\n for row in range(len(col)):\n for column in range(1):\n label=Label(tab4,width=22,height=1,text=row_df33[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=1,sticky=\"nsew\",padx=1,pady=1)\n tab1.grid_columnconfigure(column,weight=1)\n \n tablayout.add(tab4,text=\"配方3\")\n\n tab5=Frame(tablayout)\n tab5.pack(fill=\"both\")\n \n for row in range(len(col)):\n for column in range(1):\n label=Label(tab5,width=22,height=1,text=col[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=0,sticky=\"nsew\",padx=1,pady=1)\n tab1.grid_columnconfigure(column,weight=1)\n\n for row in range(len(col)):\n for column in range(1):\n label=Label(tab5,width=22,height=1,text=row_text[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=1,sticky=\"nsew\",padx=1,pady=1)\n tab1.grid_columnconfigure(column,weight=1)\n \n tablayout.add(tab5,text=\"最接近配方\")\n\n tablayout.pack()\n tablayout2.pack()\n window.mainloop()\n \n\n\ndef CircleCallback(event,x,y,flags,param):\n n=8\n global refPt,PtBGR,w,h,Serial,r1,r2,r3,r4,rate,rate2,rate3,r6,r7,r8,r9,add,add2,add3,color,b,g,r,df3,name,rate,col,row_text\n global row_df3,row_df32,row_df33,row_text2,row_nf3,row_nf32,nf3,row_nf33,name_n,rate_n,ncol\n if event == cv2.EVENT_LBUTTONDOWN:\n\n #下面n代表取樣點數 若n越大則越精準一般不建議超過1000\n n=500\n for c in range(0,n):\n c+=1\n #若n改變下面499改為n-1\n ranx=(random.randint(0,499))\n rany=(random.randint(0,499))\n refPt.append((ranx,rany))\n b, g, r = img[ranx,rany]\n PtBGR.append((b,g,r)) \n #print(PtBGR[0:n])\n b=[x[0] for x in PtBGR]\n g=[x[1] for x in PtBGR]\n r=[x[2] for x in PtBGR]\n if len(refPt)==n:\n BAvr=(round(sum(b[0:n])/n))\n GAvr=(round(sum(g[0:n])/n))\n RAvr=(round(sum(r[0:n])/n))\n SumRGB=(BAvr+GAvr+RAvr)\n SumAvr=(round(SumRGB/3))\n color_def(BAvr,GAvr,RAvr)\n color_name.append(color)\n AvrRGB={'R':RAvr,'G':GAvr,'B':BAvr,'Sum':SumRGB,'Avr':SumAvr,'color':color_name}\n df_test = pd.DataFrame(AvrRGB,index=[0])\n dfread = pd.read_csv(\".data base\\\\%s\" %(result2))\n dfread['A']= round((dfread['R'] + dfread['G'] + dfread['B'])/3)\n dfread['S'] = dfread['R'] + dfread['G'] + dfread['B']\n\n #交叉比對法\n nf=pd.DataFrame(list(zip(r,g,b)),columns=['R','G','B'])\n nfread=dfread[['Serial no','R','G','B']]\n loan=pd.merge(nf,nfread)\n group=loan.groupby('Serial no')\n Newnf=group.count()\n Newnf['P']=round((Newnf['R']/Newnf['R'].sum())* 100)\n Newnf=Newnf.sort_values(by=['R'],ascending=False)\n Rate=Newnf['P'].tolist()\n Newnf.columns = [' '.join(col).strip() for col in Newnf.columns.values]\n nf2=pd.DataFrame(Newnf.to_records())\n nf2=nf2.head(5)\n \n print(nf2)\n if(len(nf2['Serial no'])==0):\n i=0\n j=0\n k=0\n elif(len(nf2['Serial no'])==1):\n i=nf2.at[0,'Serial no']\n j=0\n k=0\n elif(len(nf2['Serial no'])==2):\n i=nf2.at[0,'Serial no']\n j=nf2.at[1,'Serial no']\n k=0\n else:\n i=nf2.at[0,'Serial no']\n j=nf2.at[1,'Serial no']\n k=nf2.at[2,'Serial no']\n print(k)\n nf3=dfread.loc[(dfread['Serial no']==i)].head(1)\n nf4=dfread.loc[(dfread['Serial no']==j)].head(1)\n nf5=dfread.loc[(dfread['Serial no']==k)].head(1)\n nf3=nf3.drop(['R','G','B','color','A','S'],axis=1)\n nf4=nf4.drop(['R','G','B','color','A','S'],axis=1)\n nf5=nf5.drop(['R','G','B','color','A','S'],axis=1)\n nf=pd.concat([nf3, nf4,nf5])\n nf.to_csv(\".data base\\\\test_result2.csv\",index=False,encoding=\"utf_8_sig\")\n print(nf)\n ncol=list(nf.columns)\n if(len(nf2['Serial no'])==0):\n root=tk.Tk()\n root.withdraw()\n messagebox.showinfo(\"失敗\", \"未找到符合資料\")\n elif(len(nf2['Serial no'])==1):\n row_nf3=nf3.iloc[0].tolist()\n row_nf32=['x','x','x','x','x','x','x','x','x','x','x','x','x','x','x','x']\n row_nf33=['x','x','x','x','x','x','x','x','x','x','x','x','x','x','x','x']\n\n elif(len(nf2['Serial no'])==2):\n row_nf3=nf3.iloc[0].tolist()\n row_nf32=nf4.iloc[0].tolist()\n row_nf33=['x','x','x','x','x','x','x','x','x','x','x','x','x','x','x','x']\n \n else:\n row_nf3=nf3.iloc[0].tolist()\n row_nf32=nf4.iloc[0].tolist()\n print(row_nf32)\n row_nf33=nf5.iloc[0].tolist()\n name_n=nf['Serial no'].tolist()\n rate_n=Rate\n \n \n #顏色分類法\n #(可以改)當需要寬鬆一點的比對,刪除下面一段的上下兩個'''\n \n '''\n newdf1=dfread.loc[(dfread['color']==color)|(dfread['A']==SumAvr)]\n newdf2=dfread.loc[(dfread['S']<=(SumRGB+2))&(dfread['S']>=(SumRGB-2))]\n newdf=pd.concat([newdf1, newdf2])\n '''\n\n #(可以改)當需要嚴格一點的比對,刪除下面一段的上下兩個'''\n '''\n newdf=dfread.loc[(dfread['A']==SumAvr)|(dfread['S']==SumRGB)]\n newdf=newdf.loc[(newdf['color']==color)]\n '''\n\n #並在下面一行的開頭加上#\n newdf=dfread.loc[(dfread['color']==color)|(dfread['A']==SumAvr)|(dfread['S']==SumRGB)]\n\n \n newdf.insert(1,'Rdif',newdf[['R']].add(-RAvr))\n newdf.insert(2,'Gdif',newdf[['G']].add(-GAvr))\n newdf.insert(3,'Bdif',newdf[['B']].add(-BAvr))\n newdf.insert(4,'Adif',abs(newdf[['A']].add(-SumAvr)))\n newdf.insert(5,'Sdif',abs(newdf[['S']].add(-SumRGB)))\n df=newdf.sort_values(by=['Sdif', 'Adif'], ascending=True).head(100)\n df.insert(1,'dalta',abs(df['Rdif']+df['Gdif']+df['Bdif']))\n df=df.sort_values(by=['dalta'],ascending=True)\n data=df[['Serial no','color']]\n group=data.groupby('Serial no')\n datacount=group.count()\n df=df.merge(datacount,left_on='Serial no',right_index=True)\n df=df.sort_values(by=['color_y'],ascending=False)\n df3=df.drop_duplicates('Serial no', keep='first', inplace=False).head()\n print(df3)\n df3.to_csv(\".data base\\\\test_result.csv\",index=False,encoding=\"utf_8_sig\")\n if df3.empty ==True:\n root=tk.Tk()\n root.withdraw()\n messagebox.showinfo(\"失敗\", \"未找到符合資料\")\n \n elif len(df3)<=2:\n \n root=tk.Tk()\n root.withdraw()\n messagebox.showinfo(\"失敗\", \"只找到少數資料\\n 已存在test_result\")\n \n else:\n Zero=df3.loc[(df3['Rdif']==0)&(df3['Gdif']==0)&(df3['Bdif']==0)]\n Zero=Zero.head(3)\n if Zero.empty==False:\n Zero=Zero.drop(['R','G','B','dalta','Rdif','Gdif','Bdif','A','S','Adif','Sdif','color_x','color_y'],axis=1)\n name=df3['Serial no'].tolist()\n rate=df3['color_y'].tolist()\n col=list(Zero.columns)\n row_text=Zero.iloc[0].tolist()\n df3=df3.drop(['R','G','B','dalta','Rdif','Gdif','Bdif','A','S','Adif','Sdif','color_x','color_y'],axis=1)\n row_df3=df3.iloc[0].tolist()\n row_df32=df3.iloc[1].tolist()\n row_df33=df3.iloc[2].tolist()\n Result_Print()\n print('0')\n print(Zero)\n \n else:\n filtdf=df3.loc[(df3['A']>=SumAvr)]\n filtdf=filtdf.sort_values(by=['Rdif','Gdif','Bdif']).head()\n Neg_filtdf=df3.loc[(df3['A']<SumAvr)]\n Neg_filtdf=Neg_filtdf.sort_values(by=['Rdif','Gdif','Bdif']).head()\n \n if Neg_filtdf.empty==True and filtdf.empty ==True:\n root=tk.Tk()\n root.withdraw()\n messagebox.showinfo(\"失敗\", \"未找到符合資料\")\n else:\n filtdf=filtdf.drop(['R','G','B','dalta','Rdif','Gdif','Bdif','A','S','Adif','Sdif','color_x','color_y'],axis=1)\n name=df3['Serial no'].tolist()\n rate=df3['color_y'].tolist()\n col=list(filtdf.columns)\n row_text=filtdf.iloc[0].tolist()\n df3=df3.drop(['R','G','B','dalta','Rdif','Gdif','Bdif','A','S','Adif','Sdif','color_x','color_y'],axis=1)\n row_df3=df3.iloc[0].tolist()\n row_df32=df3.iloc[1].tolist()\n row_df33=df3.iloc[2].tolist()\n Result_Print()\n print(\"最接近的為1\",filtdf.head(1))\n \n\n \n\ndef color_def(BAvr,GAvr,RAvr):\n \n global color\n if abs(int(BAvr)-int(GAvr))<=1 and abs(int(BAvr)-int(RAvr))<=1:\n color='White'\n return color\n \n elif BAvr>=GAvr and BAvr>=RAvr:\n if BAvr-GAvr>3 and BAvr-RAvr>=3:\n color='Blue'\n return color\n \n elif BAvr-GAvr<3:\n color='Cyan'\n return color\n \n else:\n color='Purple'\n return color\n \n \n elif GAvr>=RAvr and GAvr>=BAvr:\n if GAvr-RAvr>3 or GAvr-BAvr>3:\n color='Green'\n return color\n \n elif GAvr-RAvr<3:\n color='Yellow'\n return color\n \n else:\n color='Cyan'\n return color\n \n \n elif RAvr>=GAvr and RAvr>=BAvr:\n if RAvr-GAvr>=3 and RAvr-BAvr>=3:\n color='Red'\n return color\n\n elif RAvr-GAvr<3:\n color='Yellow'\n return color\n\n else:\n color='Purple'\n return color\n \n\n else:\n color='White'\n\n\n#img=cv2.imdecode(np.fromfile(r\"D:\\桌面\\JA Material\\JA-material\\pure\\%s\" % (result),dtype=np.uint8),-1) \nimg=cv2.imdecode(np.fromfile(r\".pure\\%s\" % (result),dtype=np.uint8),-1)\ncv2.namedWindow('mouse_callback')\n\n# bind the callback function to window\n\ncv2.setMouseCallback('mouse_callback',CircleCallback)\n \ndef main():\n while (True):\n cv2.imshow('mouse_callback',img)\n if cv2.waitKey(20) == 27:\n break\n \n cv2.destroyAllWindows()\n \n \nif __name__ == \"__main__\":\n main()\n", "import cv2\nimport numpy as np\nimport pandas as pd\nimport tkinter as tk\nimport random\nfrom tkinter import *\nfrom tkinter import ttk\nfrom tkinter import messagebox\nfrom tkinter import Scale, Tk\nfrom tkinter.ttk import Notebook\nrefPt = []\nPtBGR = []\nr = []\ng = []\nb = []\nrefPt = []\nSerial = []\nPtBGR = []\nr1 = []\nr2 = []\nr3 = []\nr4 = []\nrate = []\nrate2 = []\nrate3 = []\nr6 = []\nr7 = []\nr8 = []\nr9 = []\nadd = []\nadd2 = []\nadd3 = []\ncolor_name = []\nlocate = []\nbrand = []\nboolean = False\nroot = tk.Tk()\nroot.geometry('400x200')\nroot.configure(background='white')\n\n\ndef quitScreen():\n messagebox.showinfo('collecting data', '點擊視窗開始分析')\n root.destroy()\n root2 = Tk()\n root2.destroy()\n\n\ndef getTextInput():\n global result, result2\n result = text.get(1.0, tk.END + '-1c')\n result2 = text2.get(1.0, tk.END + '-1c')\n\n\nimg = PhotoImage(file='buttons/QJsmall.png')\npanel = tk.Label(root, image=img)\npanel.grid(row=0, column=0, columnspan=3)\nlabelmode = tk.Label(root, text=\"\"\"請輸入圖片完整名稱\n ex:104432 w7.jpg\"\"\", bg='white')\nlabelmode.configure(font=('微軟正黑體', 10))\nlabelmode.grid(row=1)\ntext = tk.Text(root, width=20, height=1)\ntext.insert('insert', '.jpg')\ntext.configure(font=('微軟正黑體', 10))\ntext.grid(row=1, column=2)\nlabelmode2 = tk.Label(root, text=\"\"\"請輸入讀取資料庫名稱\n ex:PureColorBig.csv\"\"\", bg=\n 'white')\nlabelmode2.configure(font=('微軟正黑體', 10))\nlabelmode2.grid(row=2)\ntext2 = tk.Text(root, width=20, height=1)\ntext2.insert('insert', 'PureColorBig.csv')\ntext2.configure(font=('微軟正黑體', 10))\ntext2.grid(row=2, column=2)\nimg_confirm = PhotoImage(file='buttons/confirm.png')\nimg_start = PhotoImage(file='buttons/start.png')\nbtnRead = tk.Button(root, image=img_confirm, text=' ', relief='flat',\n command=getTextInput)\nbtnRead.grid(row=5, column=1)\nbtnRead2 = tk.Button(root, image=img_start, text=' ', relief='flat',\n command=quitScreen)\nbtnRead2.grid(row=5, column=2)\nroot.mainloop()\n\n\ndef Result_Print():\n window = Tk()\n window.title('分析結果')\n window.geometry('600x900')\n frame2 = Frame(window)\n frame2.pack(fill='both')\n tablayout = Notebook(frame2)\n tablayout2 = Notebook(frame2)\n ntab1 = Frame(tablayout2)\n ntab1.pack(fill='both')\n for row in range(len(name_n)):\n for column in range(1):\n label = Label(ntab1, width=25, height=2, text=name_n[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name_n)):\n for column in range(1):\n label = Label(ntab1, width=5, height=2, text='%s' % rate_n[row],\n bg='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name_n)):\n for column in range(1):\n label = Label(ntab1, width=12, height=2, text='% 相似程度', bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab1, text='交叉配對結果')\n ntab2 = Frame(tablayout2)\n ntab2.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab2, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab2, width=22, height=1, text=row_nf3[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab2, text='配方1')\n ntab3 = Frame(tablayout2)\n ntab3.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab3, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab3, width=22, height=1, text=row_nf32[row], bg\n ='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab3, text='配方2')\n ntab4 = Frame(tablayout2)\n ntab4.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab4, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab4, width=22, height=1, text=row_nf33[row], bg\n ='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab4, text='配方3')\n ntab5 = Frame(tablayout2)\n ntab5.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab5, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab5, width=22, height=1, text=row_nf3[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab5, text='最接近配方')\n tab1 = Frame(tablayout)\n tab1.pack(fill='both')\n for row in range(len(name)):\n for column in range(1):\n label = Label(tab1, width=25, height=2, text=name[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name)):\n for column in range(1):\n label = Label(tab1, width=5, height=2, text='%s' % rate[row],\n bg='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name)):\n for column in range(1):\n label = Label(tab1, width=12, height=2, text='% 相似程度', bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab1, text='顏色分類結果')\n tab2 = Frame(tablayout)\n tab2.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab2, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab2, width=22, height=1, text=row_df3[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab2, text='配方1')\n tab3 = Frame(tablayout)\n tab3.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab3, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab3, width=22, height=1, text=row_df32[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab3, text='配方2')\n tab4 = Frame(tablayout)\n tab4.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab4, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab4, width=22, height=1, text=row_df33[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab4, text='配方3')\n tab5 = Frame(tablayout)\n tab5.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab5, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab5, width=22, height=1, text=row_text[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab5, text='最接近配方')\n tablayout.pack()\n tablayout2.pack()\n window.mainloop()\n\n\ndef CircleCallback(event, x, y, flags, param):\n n = 8\n global refPt, PtBGR, w, h, Serial, r1, r2, r3, r4, rate, rate2, rate3, r6, r7, r8, r9, add, add2, add3, color, b, g, r, df3, name, rate, col, row_text\n global row_df3, row_df32, row_df33, row_text2, row_nf3, row_nf32, nf3, row_nf33, name_n, rate_n, ncol\n if event == cv2.EVENT_LBUTTONDOWN:\n n = 500\n for c in range(0, n):\n c += 1\n ranx = random.randint(0, 499)\n rany = random.randint(0, 499)\n refPt.append((ranx, rany))\n b, g, r = img[ranx, rany]\n PtBGR.append((b, g, r))\n b = [x[0] for x in PtBGR]\n g = [x[1] for x in PtBGR]\n r = [x[2] for x in PtBGR]\n if len(refPt) == n:\n BAvr = round(sum(b[0:n]) / n)\n GAvr = round(sum(g[0:n]) / n)\n RAvr = round(sum(r[0:n]) / n)\n SumRGB = BAvr + GAvr + RAvr\n SumAvr = round(SumRGB / 3)\n color_def(BAvr, GAvr, RAvr)\n color_name.append(color)\n AvrRGB = {'R': RAvr, 'G': GAvr, 'B': BAvr, 'Sum': SumRGB,\n 'Avr': SumAvr, 'color': color_name}\n df_test = pd.DataFrame(AvrRGB, index=[0])\n dfread = pd.read_csv('.data base\\\\%s' % result2)\n dfread['A'] = round((dfread['R'] + dfread['G'] + dfread['B'\n ]) / 3)\n dfread['S'] = dfread['R'] + dfread['G'] + dfread['B']\n nf = pd.DataFrame(list(zip(r, g, b)), columns=['R', 'G', 'B'])\n nfread = dfread[['Serial no', 'R', 'G', 'B']]\n loan = pd.merge(nf, nfread)\n group = loan.groupby('Serial no')\n Newnf = group.count()\n Newnf['P'] = round(Newnf['R'] / Newnf['R'].sum() * 100)\n Newnf = Newnf.sort_values(by=['R'], ascending=False)\n Rate = Newnf['P'].tolist()\n Newnf.columns = [' '.join(col).strip() for col in Newnf.\n columns.values]\n nf2 = pd.DataFrame(Newnf.to_records())\n nf2 = nf2.head(5)\n print(nf2)\n if len(nf2['Serial no']) == 0:\n i = 0\n j = 0\n k = 0\n elif len(nf2['Serial no']) == 1:\n i = nf2.at[0, 'Serial no']\n j = 0\n k = 0\n elif len(nf2['Serial no']) == 2:\n i = nf2.at[0, 'Serial no']\n j = nf2.at[1, 'Serial no']\n k = 0\n else:\n i = nf2.at[0, 'Serial no']\n j = nf2.at[1, 'Serial no']\n k = nf2.at[2, 'Serial no']\n print(k)\n nf3 = dfread.loc[dfread['Serial no'] == i].head(1)\n nf4 = dfread.loc[dfread['Serial no'] == j].head(1)\n nf5 = dfread.loc[dfread['Serial no'] == k].head(1)\n nf3 = nf3.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)\n nf4 = nf4.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)\n nf5 = nf5.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)\n nf = pd.concat([nf3, nf4, nf5])\n nf.to_csv('.data base\\\\test_result2.csv', index=False,\n encoding='utf_8_sig')\n print(nf)\n ncol = list(nf.columns)\n if len(nf2['Serial no']) == 0:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '未找到符合資料')\n elif len(nf2['Serial no']) == 1:\n row_nf3 = nf3.iloc[0].tolist()\n row_nf32 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',\n 'x', 'x', 'x', 'x', 'x', 'x', 'x']\n row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',\n 'x', 'x', 'x', 'x', 'x', 'x', 'x']\n elif len(nf2['Serial no']) == 2:\n row_nf3 = nf3.iloc[0].tolist()\n row_nf32 = nf4.iloc[0].tolist()\n row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',\n 'x', 'x', 'x', 'x', 'x', 'x', 'x']\n else:\n row_nf3 = nf3.iloc[0].tolist()\n row_nf32 = nf4.iloc[0].tolist()\n print(row_nf32)\n row_nf33 = nf5.iloc[0].tolist()\n name_n = nf['Serial no'].tolist()\n rate_n = Rate\n \"\"\"\n newdf1=dfread.loc[(dfread['color']==color)|(dfread['A']==SumAvr)]\n newdf2=dfread.loc[(dfread['S']<=(SumRGB+2))&(dfread['S']>=(SumRGB-2))]\n newdf=pd.concat([newdf1, newdf2])\n \"\"\"\n \"\"\"\n newdf=dfread.loc[(dfread['A']==SumAvr)|(dfread['S']==SumRGB)]\n newdf=newdf.loc[(newdf['color']==color)]\n \"\"\"\n newdf = dfread.loc[(dfread['color'] == color) | (dfread['A'\n ] == SumAvr) | (dfread['S'] == SumRGB)]\n newdf.insert(1, 'Rdif', newdf[['R']].add(-RAvr))\n newdf.insert(2, 'Gdif', newdf[['G']].add(-GAvr))\n newdf.insert(3, 'Bdif', newdf[['B']].add(-BAvr))\n newdf.insert(4, 'Adif', abs(newdf[['A']].add(-SumAvr)))\n newdf.insert(5, 'Sdif', abs(newdf[['S']].add(-SumRGB)))\n df = newdf.sort_values(by=['Sdif', 'Adif'], ascending=True\n ).head(100)\n df.insert(1, 'dalta', abs(df['Rdif'] + df['Gdif'] + df['Bdif'])\n )\n df = df.sort_values(by=['dalta'], ascending=True)\n data = df[['Serial no', 'color']]\n group = data.groupby('Serial no')\n datacount = group.count()\n df = df.merge(datacount, left_on='Serial no', right_index=True)\n df = df.sort_values(by=['color_y'], ascending=False)\n df3 = df.drop_duplicates('Serial no', keep='first', inplace\n =False).head()\n print(df3)\n df3.to_csv('.data base\\\\test_result.csv', index=False,\n encoding='utf_8_sig')\n if df3.empty == True:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '未找到符合資料')\n elif len(df3) <= 2:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '只找到少數資料\\n 已存在test_result')\n else:\n Zero = df3.loc[(df3['Rdif'] == 0) & (df3['Gdif'] == 0) &\n (df3['Bdif'] == 0)]\n Zero = Zero.head(3)\n if Zero.empty == False:\n Zero = Zero.drop(['R', 'G', 'B', 'dalta', 'Rdif',\n 'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',\n 'color_x', 'color_y'], axis=1)\n name = df3['Serial no'].tolist()\n rate = df3['color_y'].tolist()\n col = list(Zero.columns)\n row_text = Zero.iloc[0].tolist()\n df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',\n 'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',\n 'color_x', 'color_y'], axis=1)\n row_df3 = df3.iloc[0].tolist()\n row_df32 = df3.iloc[1].tolist()\n row_df33 = df3.iloc[2].tolist()\n Result_Print()\n print('0')\n print(Zero)\n else:\n filtdf = df3.loc[df3['A'] >= SumAvr]\n filtdf = filtdf.sort_values(by=['Rdif', 'Gdif', 'Bdif']\n ).head()\n Neg_filtdf = df3.loc[df3['A'] < SumAvr]\n Neg_filtdf = Neg_filtdf.sort_values(by=['Rdif',\n 'Gdif', 'Bdif']).head()\n if Neg_filtdf.empty == True and filtdf.empty == True:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '未找到符合資料')\n else:\n filtdf = filtdf.drop(['R', 'G', 'B', 'dalta',\n 'Rdif', 'Gdif', 'Bdif', 'A', 'S', 'Adif',\n 'Sdif', 'color_x', 'color_y'], axis=1)\n name = df3['Serial no'].tolist()\n rate = df3['color_y'].tolist()\n col = list(filtdf.columns)\n row_text = filtdf.iloc[0].tolist()\n df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',\n 'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',\n 'color_x', 'color_y'], axis=1)\n row_df3 = df3.iloc[0].tolist()\n row_df32 = df3.iloc[1].tolist()\n row_df33 = df3.iloc[2].tolist()\n Result_Print()\n print('最接近的為1', filtdf.head(1))\n\n\ndef color_def(BAvr, GAvr, RAvr):\n global color\n if abs(int(BAvr) - int(GAvr)) <= 1 and abs(int(BAvr) - int(RAvr)) <= 1:\n color = 'White'\n return color\n elif BAvr >= GAvr and BAvr >= RAvr:\n if BAvr - GAvr > 3 and BAvr - RAvr >= 3:\n color = 'Blue'\n return color\n elif BAvr - GAvr < 3:\n color = 'Cyan'\n return color\n else:\n color = 'Purple'\n return color\n elif GAvr >= RAvr and GAvr >= BAvr:\n if GAvr - RAvr > 3 or GAvr - BAvr > 3:\n color = 'Green'\n return color\n elif GAvr - RAvr < 3:\n color = 'Yellow'\n return color\n else:\n color = 'Cyan'\n return color\n elif RAvr >= GAvr and RAvr >= BAvr:\n if RAvr - GAvr >= 3 and RAvr - BAvr >= 3:\n color = 'Red'\n return color\n elif RAvr - GAvr < 3:\n color = 'Yellow'\n return color\n else:\n color = 'Purple'\n return color\n else:\n color = 'White'\n\n\nimg = cv2.imdecode(np.fromfile('.pure\\\\%s' % result, dtype=np.uint8), -1)\ncv2.namedWindow('mouse_callback')\ncv2.setMouseCallback('mouse_callback', CircleCallback)\n\n\ndef main():\n while True:\n cv2.imshow('mouse_callback', img)\n if cv2.waitKey(20) == 27:\n break\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n main()\n", "<import token>\nrefPt = []\nPtBGR = []\nr = []\ng = []\nb = []\nrefPt = []\nSerial = []\nPtBGR = []\nr1 = []\nr2 = []\nr3 = []\nr4 = []\nrate = []\nrate2 = []\nrate3 = []\nr6 = []\nr7 = []\nr8 = []\nr9 = []\nadd = []\nadd2 = []\nadd3 = []\ncolor_name = []\nlocate = []\nbrand = []\nboolean = False\nroot = tk.Tk()\nroot.geometry('400x200')\nroot.configure(background='white')\n\n\ndef quitScreen():\n messagebox.showinfo('collecting data', '點擊視窗開始分析')\n root.destroy()\n root2 = Tk()\n root2.destroy()\n\n\ndef getTextInput():\n global result, result2\n result = text.get(1.0, tk.END + '-1c')\n result2 = text2.get(1.0, tk.END + '-1c')\n\n\nimg = PhotoImage(file='buttons/QJsmall.png')\npanel = tk.Label(root, image=img)\npanel.grid(row=0, column=0, columnspan=3)\nlabelmode = tk.Label(root, text=\"\"\"請輸入圖片完整名稱\n ex:104432 w7.jpg\"\"\", bg='white')\nlabelmode.configure(font=('微軟正黑體', 10))\nlabelmode.grid(row=1)\ntext = tk.Text(root, width=20, height=1)\ntext.insert('insert', '.jpg')\ntext.configure(font=('微軟正黑體', 10))\ntext.grid(row=1, column=2)\nlabelmode2 = tk.Label(root, text=\"\"\"請輸入讀取資料庫名稱\n ex:PureColorBig.csv\"\"\", bg=\n 'white')\nlabelmode2.configure(font=('微軟正黑體', 10))\nlabelmode2.grid(row=2)\ntext2 = tk.Text(root, width=20, height=1)\ntext2.insert('insert', 'PureColorBig.csv')\ntext2.configure(font=('微軟正黑體', 10))\ntext2.grid(row=2, column=2)\nimg_confirm = PhotoImage(file='buttons/confirm.png')\nimg_start = PhotoImage(file='buttons/start.png')\nbtnRead = tk.Button(root, image=img_confirm, text=' ', relief='flat',\n command=getTextInput)\nbtnRead.grid(row=5, column=1)\nbtnRead2 = tk.Button(root, image=img_start, text=' ', relief='flat',\n command=quitScreen)\nbtnRead2.grid(row=5, column=2)\nroot.mainloop()\n\n\ndef Result_Print():\n window = Tk()\n window.title('分析結果')\n window.geometry('600x900')\n frame2 = Frame(window)\n frame2.pack(fill='both')\n tablayout = Notebook(frame2)\n tablayout2 = Notebook(frame2)\n ntab1 = Frame(tablayout2)\n ntab1.pack(fill='both')\n for row in range(len(name_n)):\n for column in range(1):\n label = Label(ntab1, width=25, height=2, text=name_n[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name_n)):\n for column in range(1):\n label = Label(ntab1, width=5, height=2, text='%s' % rate_n[row],\n bg='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name_n)):\n for column in range(1):\n label = Label(ntab1, width=12, height=2, text='% 相似程度', bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab1, text='交叉配對結果')\n ntab2 = Frame(tablayout2)\n ntab2.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab2, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab2, width=22, height=1, text=row_nf3[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab2, text='配方1')\n ntab3 = Frame(tablayout2)\n ntab3.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab3, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab3, width=22, height=1, text=row_nf32[row], bg\n ='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab3, text='配方2')\n ntab4 = Frame(tablayout2)\n ntab4.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab4, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab4, width=22, height=1, text=row_nf33[row], bg\n ='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab4, text='配方3')\n ntab5 = Frame(tablayout2)\n ntab5.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab5, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab5, width=22, height=1, text=row_nf3[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab5, text='最接近配方')\n tab1 = Frame(tablayout)\n tab1.pack(fill='both')\n for row in range(len(name)):\n for column in range(1):\n label = Label(tab1, width=25, height=2, text=name[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name)):\n for column in range(1):\n label = Label(tab1, width=5, height=2, text='%s' % rate[row],\n bg='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name)):\n for column in range(1):\n label = Label(tab1, width=12, height=2, text='% 相似程度', bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab1, text='顏色分類結果')\n tab2 = Frame(tablayout)\n tab2.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab2, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab2, width=22, height=1, text=row_df3[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab2, text='配方1')\n tab3 = Frame(tablayout)\n tab3.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab3, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab3, width=22, height=1, text=row_df32[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab3, text='配方2')\n tab4 = Frame(tablayout)\n tab4.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab4, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab4, width=22, height=1, text=row_df33[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab4, text='配方3')\n tab5 = Frame(tablayout)\n tab5.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab5, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab5, width=22, height=1, text=row_text[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab5, text='最接近配方')\n tablayout.pack()\n tablayout2.pack()\n window.mainloop()\n\n\ndef CircleCallback(event, x, y, flags, param):\n n = 8\n global refPt, PtBGR, w, h, Serial, r1, r2, r3, r4, rate, rate2, rate3, r6, r7, r8, r9, add, add2, add3, color, b, g, r, df3, name, rate, col, row_text\n global row_df3, row_df32, row_df33, row_text2, row_nf3, row_nf32, nf3, row_nf33, name_n, rate_n, ncol\n if event == cv2.EVENT_LBUTTONDOWN:\n n = 500\n for c in range(0, n):\n c += 1\n ranx = random.randint(0, 499)\n rany = random.randint(0, 499)\n refPt.append((ranx, rany))\n b, g, r = img[ranx, rany]\n PtBGR.append((b, g, r))\n b = [x[0] for x in PtBGR]\n g = [x[1] for x in PtBGR]\n r = [x[2] for x in PtBGR]\n if len(refPt) == n:\n BAvr = round(sum(b[0:n]) / n)\n GAvr = round(sum(g[0:n]) / n)\n RAvr = round(sum(r[0:n]) / n)\n SumRGB = BAvr + GAvr + RAvr\n SumAvr = round(SumRGB / 3)\n color_def(BAvr, GAvr, RAvr)\n color_name.append(color)\n AvrRGB = {'R': RAvr, 'G': GAvr, 'B': BAvr, 'Sum': SumRGB,\n 'Avr': SumAvr, 'color': color_name}\n df_test = pd.DataFrame(AvrRGB, index=[0])\n dfread = pd.read_csv('.data base\\\\%s' % result2)\n dfread['A'] = round((dfread['R'] + dfread['G'] + dfread['B'\n ]) / 3)\n dfread['S'] = dfread['R'] + dfread['G'] + dfread['B']\n nf = pd.DataFrame(list(zip(r, g, b)), columns=['R', 'G', 'B'])\n nfread = dfread[['Serial no', 'R', 'G', 'B']]\n loan = pd.merge(nf, nfread)\n group = loan.groupby('Serial no')\n Newnf = group.count()\n Newnf['P'] = round(Newnf['R'] / Newnf['R'].sum() * 100)\n Newnf = Newnf.sort_values(by=['R'], ascending=False)\n Rate = Newnf['P'].tolist()\n Newnf.columns = [' '.join(col).strip() for col in Newnf.\n columns.values]\n nf2 = pd.DataFrame(Newnf.to_records())\n nf2 = nf2.head(5)\n print(nf2)\n if len(nf2['Serial no']) == 0:\n i = 0\n j = 0\n k = 0\n elif len(nf2['Serial no']) == 1:\n i = nf2.at[0, 'Serial no']\n j = 0\n k = 0\n elif len(nf2['Serial no']) == 2:\n i = nf2.at[0, 'Serial no']\n j = nf2.at[1, 'Serial no']\n k = 0\n else:\n i = nf2.at[0, 'Serial no']\n j = nf2.at[1, 'Serial no']\n k = nf2.at[2, 'Serial no']\n print(k)\n nf3 = dfread.loc[dfread['Serial no'] == i].head(1)\n nf4 = dfread.loc[dfread['Serial no'] == j].head(1)\n nf5 = dfread.loc[dfread['Serial no'] == k].head(1)\n nf3 = nf3.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)\n nf4 = nf4.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)\n nf5 = nf5.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)\n nf = pd.concat([nf3, nf4, nf5])\n nf.to_csv('.data base\\\\test_result2.csv', index=False,\n encoding='utf_8_sig')\n print(nf)\n ncol = list(nf.columns)\n if len(nf2['Serial no']) == 0:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '未找到符合資料')\n elif len(nf2['Serial no']) == 1:\n row_nf3 = nf3.iloc[0].tolist()\n row_nf32 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',\n 'x', 'x', 'x', 'x', 'x', 'x', 'x']\n row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',\n 'x', 'x', 'x', 'x', 'x', 'x', 'x']\n elif len(nf2['Serial no']) == 2:\n row_nf3 = nf3.iloc[0].tolist()\n row_nf32 = nf4.iloc[0].tolist()\n row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',\n 'x', 'x', 'x', 'x', 'x', 'x', 'x']\n else:\n row_nf3 = nf3.iloc[0].tolist()\n row_nf32 = nf4.iloc[0].tolist()\n print(row_nf32)\n row_nf33 = nf5.iloc[0].tolist()\n name_n = nf['Serial no'].tolist()\n rate_n = Rate\n \"\"\"\n newdf1=dfread.loc[(dfread['color']==color)|(dfread['A']==SumAvr)]\n newdf2=dfread.loc[(dfread['S']<=(SumRGB+2))&(dfread['S']>=(SumRGB-2))]\n newdf=pd.concat([newdf1, newdf2])\n \"\"\"\n \"\"\"\n newdf=dfread.loc[(dfread['A']==SumAvr)|(dfread['S']==SumRGB)]\n newdf=newdf.loc[(newdf['color']==color)]\n \"\"\"\n newdf = dfread.loc[(dfread['color'] == color) | (dfread['A'\n ] == SumAvr) | (dfread['S'] == SumRGB)]\n newdf.insert(1, 'Rdif', newdf[['R']].add(-RAvr))\n newdf.insert(2, 'Gdif', newdf[['G']].add(-GAvr))\n newdf.insert(3, 'Bdif', newdf[['B']].add(-BAvr))\n newdf.insert(4, 'Adif', abs(newdf[['A']].add(-SumAvr)))\n newdf.insert(5, 'Sdif', abs(newdf[['S']].add(-SumRGB)))\n df = newdf.sort_values(by=['Sdif', 'Adif'], ascending=True\n ).head(100)\n df.insert(1, 'dalta', abs(df['Rdif'] + df['Gdif'] + df['Bdif'])\n )\n df = df.sort_values(by=['dalta'], ascending=True)\n data = df[['Serial no', 'color']]\n group = data.groupby('Serial no')\n datacount = group.count()\n df = df.merge(datacount, left_on='Serial no', right_index=True)\n df = df.sort_values(by=['color_y'], ascending=False)\n df3 = df.drop_duplicates('Serial no', keep='first', inplace\n =False).head()\n print(df3)\n df3.to_csv('.data base\\\\test_result.csv', index=False,\n encoding='utf_8_sig')\n if df3.empty == True:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '未找到符合資料')\n elif len(df3) <= 2:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '只找到少數資料\\n 已存在test_result')\n else:\n Zero = df3.loc[(df3['Rdif'] == 0) & (df3['Gdif'] == 0) &\n (df3['Bdif'] == 0)]\n Zero = Zero.head(3)\n if Zero.empty == False:\n Zero = Zero.drop(['R', 'G', 'B', 'dalta', 'Rdif',\n 'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',\n 'color_x', 'color_y'], axis=1)\n name = df3['Serial no'].tolist()\n rate = df3['color_y'].tolist()\n col = list(Zero.columns)\n row_text = Zero.iloc[0].tolist()\n df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',\n 'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',\n 'color_x', 'color_y'], axis=1)\n row_df3 = df3.iloc[0].tolist()\n row_df32 = df3.iloc[1].tolist()\n row_df33 = df3.iloc[2].tolist()\n Result_Print()\n print('0')\n print(Zero)\n else:\n filtdf = df3.loc[df3['A'] >= SumAvr]\n filtdf = filtdf.sort_values(by=['Rdif', 'Gdif', 'Bdif']\n ).head()\n Neg_filtdf = df3.loc[df3['A'] < SumAvr]\n Neg_filtdf = Neg_filtdf.sort_values(by=['Rdif',\n 'Gdif', 'Bdif']).head()\n if Neg_filtdf.empty == True and filtdf.empty == True:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '未找到符合資料')\n else:\n filtdf = filtdf.drop(['R', 'G', 'B', 'dalta',\n 'Rdif', 'Gdif', 'Bdif', 'A', 'S', 'Adif',\n 'Sdif', 'color_x', 'color_y'], axis=1)\n name = df3['Serial no'].tolist()\n rate = df3['color_y'].tolist()\n col = list(filtdf.columns)\n row_text = filtdf.iloc[0].tolist()\n df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',\n 'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',\n 'color_x', 'color_y'], axis=1)\n row_df3 = df3.iloc[0].tolist()\n row_df32 = df3.iloc[1].tolist()\n row_df33 = df3.iloc[2].tolist()\n Result_Print()\n print('最接近的為1', filtdf.head(1))\n\n\ndef color_def(BAvr, GAvr, RAvr):\n global color\n if abs(int(BAvr) - int(GAvr)) <= 1 and abs(int(BAvr) - int(RAvr)) <= 1:\n color = 'White'\n return color\n elif BAvr >= GAvr and BAvr >= RAvr:\n if BAvr - GAvr > 3 and BAvr - RAvr >= 3:\n color = 'Blue'\n return color\n elif BAvr - GAvr < 3:\n color = 'Cyan'\n return color\n else:\n color = 'Purple'\n return color\n elif GAvr >= RAvr and GAvr >= BAvr:\n if GAvr - RAvr > 3 or GAvr - BAvr > 3:\n color = 'Green'\n return color\n elif GAvr - RAvr < 3:\n color = 'Yellow'\n return color\n else:\n color = 'Cyan'\n return color\n elif RAvr >= GAvr and RAvr >= BAvr:\n if RAvr - GAvr >= 3 and RAvr - BAvr >= 3:\n color = 'Red'\n return color\n elif RAvr - GAvr < 3:\n color = 'Yellow'\n return color\n else:\n color = 'Purple'\n return color\n else:\n color = 'White'\n\n\nimg = cv2.imdecode(np.fromfile('.pure\\\\%s' % result, dtype=np.uint8), -1)\ncv2.namedWindow('mouse_callback')\ncv2.setMouseCallback('mouse_callback', CircleCallback)\n\n\ndef main():\n while True:\n cv2.imshow('mouse_callback', img)\n if cv2.waitKey(20) == 27:\n break\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n main()\n", "<import token>\n<assignment token>\nroot.geometry('400x200')\nroot.configure(background='white')\n\n\ndef quitScreen():\n messagebox.showinfo('collecting data', '點擊視窗開始分析')\n root.destroy()\n root2 = Tk()\n root2.destroy()\n\n\ndef getTextInput():\n global result, result2\n result = text.get(1.0, tk.END + '-1c')\n result2 = text2.get(1.0, tk.END + '-1c')\n\n\n<assignment token>\npanel.grid(row=0, column=0, columnspan=3)\n<assignment token>\nlabelmode.configure(font=('微軟正黑體', 10))\nlabelmode.grid(row=1)\n<assignment token>\ntext.insert('insert', '.jpg')\ntext.configure(font=('微軟正黑體', 10))\ntext.grid(row=1, column=2)\n<assignment token>\nlabelmode2.configure(font=('微軟正黑體', 10))\nlabelmode2.grid(row=2)\n<assignment token>\ntext2.insert('insert', 'PureColorBig.csv')\ntext2.configure(font=('微軟正黑體', 10))\ntext2.grid(row=2, column=2)\n<assignment token>\nbtnRead.grid(row=5, column=1)\n<assignment token>\nbtnRead2.grid(row=5, column=2)\nroot.mainloop()\n\n\ndef Result_Print():\n window = Tk()\n window.title('分析結果')\n window.geometry('600x900')\n frame2 = Frame(window)\n frame2.pack(fill='both')\n tablayout = Notebook(frame2)\n tablayout2 = Notebook(frame2)\n ntab1 = Frame(tablayout2)\n ntab1.pack(fill='both')\n for row in range(len(name_n)):\n for column in range(1):\n label = Label(ntab1, width=25, height=2, text=name_n[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name_n)):\n for column in range(1):\n label = Label(ntab1, width=5, height=2, text='%s' % rate_n[row],\n bg='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name_n)):\n for column in range(1):\n label = Label(ntab1, width=12, height=2, text='% 相似程度', bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab1, text='交叉配對結果')\n ntab2 = Frame(tablayout2)\n ntab2.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab2, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab2, width=22, height=1, text=row_nf3[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab2, text='配方1')\n ntab3 = Frame(tablayout2)\n ntab3.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab3, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab3, width=22, height=1, text=row_nf32[row], bg\n ='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab3, text='配方2')\n ntab4 = Frame(tablayout2)\n ntab4.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab4, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab4, width=22, height=1, text=row_nf33[row], bg\n ='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab4, text='配方3')\n ntab5 = Frame(tablayout2)\n ntab5.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab5, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab5, width=22, height=1, text=row_nf3[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab5, text='最接近配方')\n tab1 = Frame(tablayout)\n tab1.pack(fill='both')\n for row in range(len(name)):\n for column in range(1):\n label = Label(tab1, width=25, height=2, text=name[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name)):\n for column in range(1):\n label = Label(tab1, width=5, height=2, text='%s' % rate[row],\n bg='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name)):\n for column in range(1):\n label = Label(tab1, width=12, height=2, text='% 相似程度', bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab1, text='顏色分類結果')\n tab2 = Frame(tablayout)\n tab2.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab2, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab2, width=22, height=1, text=row_df3[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab2, text='配方1')\n tab3 = Frame(tablayout)\n tab3.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab3, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab3, width=22, height=1, text=row_df32[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab3, text='配方2')\n tab4 = Frame(tablayout)\n tab4.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab4, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab4, width=22, height=1, text=row_df33[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab4, text='配方3')\n tab5 = Frame(tablayout)\n tab5.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab5, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab5, width=22, height=1, text=row_text[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab5, text='最接近配方')\n tablayout.pack()\n tablayout2.pack()\n window.mainloop()\n\n\ndef CircleCallback(event, x, y, flags, param):\n n = 8\n global refPt, PtBGR, w, h, Serial, r1, r2, r3, r4, rate, rate2, rate3, r6, r7, r8, r9, add, add2, add3, color, b, g, r, df3, name, rate, col, row_text\n global row_df3, row_df32, row_df33, row_text2, row_nf3, row_nf32, nf3, row_nf33, name_n, rate_n, ncol\n if event == cv2.EVENT_LBUTTONDOWN:\n n = 500\n for c in range(0, n):\n c += 1\n ranx = random.randint(0, 499)\n rany = random.randint(0, 499)\n refPt.append((ranx, rany))\n b, g, r = img[ranx, rany]\n PtBGR.append((b, g, r))\n b = [x[0] for x in PtBGR]\n g = [x[1] for x in PtBGR]\n r = [x[2] for x in PtBGR]\n if len(refPt) == n:\n BAvr = round(sum(b[0:n]) / n)\n GAvr = round(sum(g[0:n]) / n)\n RAvr = round(sum(r[0:n]) / n)\n SumRGB = BAvr + GAvr + RAvr\n SumAvr = round(SumRGB / 3)\n color_def(BAvr, GAvr, RAvr)\n color_name.append(color)\n AvrRGB = {'R': RAvr, 'G': GAvr, 'B': BAvr, 'Sum': SumRGB,\n 'Avr': SumAvr, 'color': color_name}\n df_test = pd.DataFrame(AvrRGB, index=[0])\n dfread = pd.read_csv('.data base\\\\%s' % result2)\n dfread['A'] = round((dfread['R'] + dfread['G'] + dfread['B'\n ]) / 3)\n dfread['S'] = dfread['R'] + dfread['G'] + dfread['B']\n nf = pd.DataFrame(list(zip(r, g, b)), columns=['R', 'G', 'B'])\n nfread = dfread[['Serial no', 'R', 'G', 'B']]\n loan = pd.merge(nf, nfread)\n group = loan.groupby('Serial no')\n Newnf = group.count()\n Newnf['P'] = round(Newnf['R'] / Newnf['R'].sum() * 100)\n Newnf = Newnf.sort_values(by=['R'], ascending=False)\n Rate = Newnf['P'].tolist()\n Newnf.columns = [' '.join(col).strip() for col in Newnf.\n columns.values]\n nf2 = pd.DataFrame(Newnf.to_records())\n nf2 = nf2.head(5)\n print(nf2)\n if len(nf2['Serial no']) == 0:\n i = 0\n j = 0\n k = 0\n elif len(nf2['Serial no']) == 1:\n i = nf2.at[0, 'Serial no']\n j = 0\n k = 0\n elif len(nf2['Serial no']) == 2:\n i = nf2.at[0, 'Serial no']\n j = nf2.at[1, 'Serial no']\n k = 0\n else:\n i = nf2.at[0, 'Serial no']\n j = nf2.at[1, 'Serial no']\n k = nf2.at[2, 'Serial no']\n print(k)\n nf3 = dfread.loc[dfread['Serial no'] == i].head(1)\n nf4 = dfread.loc[dfread['Serial no'] == j].head(1)\n nf5 = dfread.loc[dfread['Serial no'] == k].head(1)\n nf3 = nf3.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)\n nf4 = nf4.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)\n nf5 = nf5.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)\n nf = pd.concat([nf3, nf4, nf5])\n nf.to_csv('.data base\\\\test_result2.csv', index=False,\n encoding='utf_8_sig')\n print(nf)\n ncol = list(nf.columns)\n if len(nf2['Serial no']) == 0:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '未找到符合資料')\n elif len(nf2['Serial no']) == 1:\n row_nf3 = nf3.iloc[0].tolist()\n row_nf32 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',\n 'x', 'x', 'x', 'x', 'x', 'x', 'x']\n row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',\n 'x', 'x', 'x', 'x', 'x', 'x', 'x']\n elif len(nf2['Serial no']) == 2:\n row_nf3 = nf3.iloc[0].tolist()\n row_nf32 = nf4.iloc[0].tolist()\n row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',\n 'x', 'x', 'x', 'x', 'x', 'x', 'x']\n else:\n row_nf3 = nf3.iloc[0].tolist()\n row_nf32 = nf4.iloc[0].tolist()\n print(row_nf32)\n row_nf33 = nf5.iloc[0].tolist()\n name_n = nf['Serial no'].tolist()\n rate_n = Rate\n \"\"\"\n newdf1=dfread.loc[(dfread['color']==color)|(dfread['A']==SumAvr)]\n newdf2=dfread.loc[(dfread['S']<=(SumRGB+2))&(dfread['S']>=(SumRGB-2))]\n newdf=pd.concat([newdf1, newdf2])\n \"\"\"\n \"\"\"\n newdf=dfread.loc[(dfread['A']==SumAvr)|(dfread['S']==SumRGB)]\n newdf=newdf.loc[(newdf['color']==color)]\n \"\"\"\n newdf = dfread.loc[(dfread['color'] == color) | (dfread['A'\n ] == SumAvr) | (dfread['S'] == SumRGB)]\n newdf.insert(1, 'Rdif', newdf[['R']].add(-RAvr))\n newdf.insert(2, 'Gdif', newdf[['G']].add(-GAvr))\n newdf.insert(3, 'Bdif', newdf[['B']].add(-BAvr))\n newdf.insert(4, 'Adif', abs(newdf[['A']].add(-SumAvr)))\n newdf.insert(5, 'Sdif', abs(newdf[['S']].add(-SumRGB)))\n df = newdf.sort_values(by=['Sdif', 'Adif'], ascending=True\n ).head(100)\n df.insert(1, 'dalta', abs(df['Rdif'] + df['Gdif'] + df['Bdif'])\n )\n df = df.sort_values(by=['dalta'], ascending=True)\n data = df[['Serial no', 'color']]\n group = data.groupby('Serial no')\n datacount = group.count()\n df = df.merge(datacount, left_on='Serial no', right_index=True)\n df = df.sort_values(by=['color_y'], ascending=False)\n df3 = df.drop_duplicates('Serial no', keep='first', inplace\n =False).head()\n print(df3)\n df3.to_csv('.data base\\\\test_result.csv', index=False,\n encoding='utf_8_sig')\n if df3.empty == True:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '未找到符合資料')\n elif len(df3) <= 2:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '只找到少數資料\\n 已存在test_result')\n else:\n Zero = df3.loc[(df3['Rdif'] == 0) & (df3['Gdif'] == 0) &\n (df3['Bdif'] == 0)]\n Zero = Zero.head(3)\n if Zero.empty == False:\n Zero = Zero.drop(['R', 'G', 'B', 'dalta', 'Rdif',\n 'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',\n 'color_x', 'color_y'], axis=1)\n name = df3['Serial no'].tolist()\n rate = df3['color_y'].tolist()\n col = list(Zero.columns)\n row_text = Zero.iloc[0].tolist()\n df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',\n 'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',\n 'color_x', 'color_y'], axis=1)\n row_df3 = df3.iloc[0].tolist()\n row_df32 = df3.iloc[1].tolist()\n row_df33 = df3.iloc[2].tolist()\n Result_Print()\n print('0')\n print(Zero)\n else:\n filtdf = df3.loc[df3['A'] >= SumAvr]\n filtdf = filtdf.sort_values(by=['Rdif', 'Gdif', 'Bdif']\n ).head()\n Neg_filtdf = df3.loc[df3['A'] < SumAvr]\n Neg_filtdf = Neg_filtdf.sort_values(by=['Rdif',\n 'Gdif', 'Bdif']).head()\n if Neg_filtdf.empty == True and filtdf.empty == True:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '未找到符合資料')\n else:\n filtdf = filtdf.drop(['R', 'G', 'B', 'dalta',\n 'Rdif', 'Gdif', 'Bdif', 'A', 'S', 'Adif',\n 'Sdif', 'color_x', 'color_y'], axis=1)\n name = df3['Serial no'].tolist()\n rate = df3['color_y'].tolist()\n col = list(filtdf.columns)\n row_text = filtdf.iloc[0].tolist()\n df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',\n 'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',\n 'color_x', 'color_y'], axis=1)\n row_df3 = df3.iloc[0].tolist()\n row_df32 = df3.iloc[1].tolist()\n row_df33 = df3.iloc[2].tolist()\n Result_Print()\n print('最接近的為1', filtdf.head(1))\n\n\ndef color_def(BAvr, GAvr, RAvr):\n global color\n if abs(int(BAvr) - int(GAvr)) <= 1 and abs(int(BAvr) - int(RAvr)) <= 1:\n color = 'White'\n return color\n elif BAvr >= GAvr and BAvr >= RAvr:\n if BAvr - GAvr > 3 and BAvr - RAvr >= 3:\n color = 'Blue'\n return color\n elif BAvr - GAvr < 3:\n color = 'Cyan'\n return color\n else:\n color = 'Purple'\n return color\n elif GAvr >= RAvr and GAvr >= BAvr:\n if GAvr - RAvr > 3 or GAvr - BAvr > 3:\n color = 'Green'\n return color\n elif GAvr - RAvr < 3:\n color = 'Yellow'\n return color\n else:\n color = 'Cyan'\n return color\n elif RAvr >= GAvr and RAvr >= BAvr:\n if RAvr - GAvr >= 3 and RAvr - BAvr >= 3:\n color = 'Red'\n return color\n elif RAvr - GAvr < 3:\n color = 'Yellow'\n return color\n else:\n color = 'Purple'\n return color\n else:\n color = 'White'\n\n\n<assignment token>\ncv2.namedWindow('mouse_callback')\ncv2.setMouseCallback('mouse_callback', CircleCallback)\n\n\ndef main():\n while True:\n cv2.imshow('mouse_callback', img)\n if cv2.waitKey(20) == 27:\n break\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n main()\n", "<import token>\n<assignment token>\n<code token>\n\n\ndef quitScreen():\n messagebox.showinfo('collecting data', '點擊視窗開始分析')\n root.destroy()\n root2 = Tk()\n root2.destroy()\n\n\ndef getTextInput():\n global result, result2\n result = text.get(1.0, tk.END + '-1c')\n result2 = text2.get(1.0, tk.END + '-1c')\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef Result_Print():\n window = Tk()\n window.title('分析結果')\n window.geometry('600x900')\n frame2 = Frame(window)\n frame2.pack(fill='both')\n tablayout = Notebook(frame2)\n tablayout2 = Notebook(frame2)\n ntab1 = Frame(tablayout2)\n ntab1.pack(fill='both')\n for row in range(len(name_n)):\n for column in range(1):\n label = Label(ntab1, width=25, height=2, text=name_n[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name_n)):\n for column in range(1):\n label = Label(ntab1, width=5, height=2, text='%s' % rate_n[row],\n bg='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name_n)):\n for column in range(1):\n label = Label(ntab1, width=12, height=2, text='% 相似程度', bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab1, text='交叉配對結果')\n ntab2 = Frame(tablayout2)\n ntab2.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab2, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab2, width=22, height=1, text=row_nf3[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab2, text='配方1')\n ntab3 = Frame(tablayout2)\n ntab3.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab3, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab3, width=22, height=1, text=row_nf32[row], bg\n ='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab3, text='配方2')\n ntab4 = Frame(tablayout2)\n ntab4.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab4, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab4, width=22, height=1, text=row_nf33[row], bg\n ='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab4, text='配方3')\n ntab5 = Frame(tablayout2)\n ntab5.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab5, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab5, width=22, height=1, text=row_nf3[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab5, text='最接近配方')\n tab1 = Frame(tablayout)\n tab1.pack(fill='both')\n for row in range(len(name)):\n for column in range(1):\n label = Label(tab1, width=25, height=2, text=name[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name)):\n for column in range(1):\n label = Label(tab1, width=5, height=2, text='%s' % rate[row],\n bg='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name)):\n for column in range(1):\n label = Label(tab1, width=12, height=2, text='% 相似程度', bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab1, text='顏色分類結果')\n tab2 = Frame(tablayout)\n tab2.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab2, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab2, width=22, height=1, text=row_df3[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab2, text='配方1')\n tab3 = Frame(tablayout)\n tab3.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab3, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab3, width=22, height=1, text=row_df32[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab3, text='配方2')\n tab4 = Frame(tablayout)\n tab4.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab4, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab4, width=22, height=1, text=row_df33[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab4, text='配方3')\n tab5 = Frame(tablayout)\n tab5.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab5, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab5, width=22, height=1, text=row_text[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab5, text='最接近配方')\n tablayout.pack()\n tablayout2.pack()\n window.mainloop()\n\n\ndef CircleCallback(event, x, y, flags, param):\n n = 8\n global refPt, PtBGR, w, h, Serial, r1, r2, r3, r4, rate, rate2, rate3, r6, r7, r8, r9, add, add2, add3, color, b, g, r, df3, name, rate, col, row_text\n global row_df3, row_df32, row_df33, row_text2, row_nf3, row_nf32, nf3, row_nf33, name_n, rate_n, ncol\n if event == cv2.EVENT_LBUTTONDOWN:\n n = 500\n for c in range(0, n):\n c += 1\n ranx = random.randint(0, 499)\n rany = random.randint(0, 499)\n refPt.append((ranx, rany))\n b, g, r = img[ranx, rany]\n PtBGR.append((b, g, r))\n b = [x[0] for x in PtBGR]\n g = [x[1] for x in PtBGR]\n r = [x[2] for x in PtBGR]\n if len(refPt) == n:\n BAvr = round(sum(b[0:n]) / n)\n GAvr = round(sum(g[0:n]) / n)\n RAvr = round(sum(r[0:n]) / n)\n SumRGB = BAvr + GAvr + RAvr\n SumAvr = round(SumRGB / 3)\n color_def(BAvr, GAvr, RAvr)\n color_name.append(color)\n AvrRGB = {'R': RAvr, 'G': GAvr, 'B': BAvr, 'Sum': SumRGB,\n 'Avr': SumAvr, 'color': color_name}\n df_test = pd.DataFrame(AvrRGB, index=[0])\n dfread = pd.read_csv('.data base\\\\%s' % result2)\n dfread['A'] = round((dfread['R'] + dfread['G'] + dfread['B'\n ]) / 3)\n dfread['S'] = dfread['R'] + dfread['G'] + dfread['B']\n nf = pd.DataFrame(list(zip(r, g, b)), columns=['R', 'G', 'B'])\n nfread = dfread[['Serial no', 'R', 'G', 'B']]\n loan = pd.merge(nf, nfread)\n group = loan.groupby('Serial no')\n Newnf = group.count()\n Newnf['P'] = round(Newnf['R'] / Newnf['R'].sum() * 100)\n Newnf = Newnf.sort_values(by=['R'], ascending=False)\n Rate = Newnf['P'].tolist()\n Newnf.columns = [' '.join(col).strip() for col in Newnf.\n columns.values]\n nf2 = pd.DataFrame(Newnf.to_records())\n nf2 = nf2.head(5)\n print(nf2)\n if len(nf2['Serial no']) == 0:\n i = 0\n j = 0\n k = 0\n elif len(nf2['Serial no']) == 1:\n i = nf2.at[0, 'Serial no']\n j = 0\n k = 0\n elif len(nf2['Serial no']) == 2:\n i = nf2.at[0, 'Serial no']\n j = nf2.at[1, 'Serial no']\n k = 0\n else:\n i = nf2.at[0, 'Serial no']\n j = nf2.at[1, 'Serial no']\n k = nf2.at[2, 'Serial no']\n print(k)\n nf3 = dfread.loc[dfread['Serial no'] == i].head(1)\n nf4 = dfread.loc[dfread['Serial no'] == j].head(1)\n nf5 = dfread.loc[dfread['Serial no'] == k].head(1)\n nf3 = nf3.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)\n nf4 = nf4.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)\n nf5 = nf5.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)\n nf = pd.concat([nf3, nf4, nf5])\n nf.to_csv('.data base\\\\test_result2.csv', index=False,\n encoding='utf_8_sig')\n print(nf)\n ncol = list(nf.columns)\n if len(nf2['Serial no']) == 0:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '未找到符合資料')\n elif len(nf2['Serial no']) == 1:\n row_nf3 = nf3.iloc[0].tolist()\n row_nf32 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',\n 'x', 'x', 'x', 'x', 'x', 'x', 'x']\n row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',\n 'x', 'x', 'x', 'x', 'x', 'x', 'x']\n elif len(nf2['Serial no']) == 2:\n row_nf3 = nf3.iloc[0].tolist()\n row_nf32 = nf4.iloc[0].tolist()\n row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',\n 'x', 'x', 'x', 'x', 'x', 'x', 'x']\n else:\n row_nf3 = nf3.iloc[0].tolist()\n row_nf32 = nf4.iloc[0].tolist()\n print(row_nf32)\n row_nf33 = nf5.iloc[0].tolist()\n name_n = nf['Serial no'].tolist()\n rate_n = Rate\n \"\"\"\n newdf1=dfread.loc[(dfread['color']==color)|(dfread['A']==SumAvr)]\n newdf2=dfread.loc[(dfread['S']<=(SumRGB+2))&(dfread['S']>=(SumRGB-2))]\n newdf=pd.concat([newdf1, newdf2])\n \"\"\"\n \"\"\"\n newdf=dfread.loc[(dfread['A']==SumAvr)|(dfread['S']==SumRGB)]\n newdf=newdf.loc[(newdf['color']==color)]\n \"\"\"\n newdf = dfread.loc[(dfread['color'] == color) | (dfread['A'\n ] == SumAvr) | (dfread['S'] == SumRGB)]\n newdf.insert(1, 'Rdif', newdf[['R']].add(-RAvr))\n newdf.insert(2, 'Gdif', newdf[['G']].add(-GAvr))\n newdf.insert(3, 'Bdif', newdf[['B']].add(-BAvr))\n newdf.insert(4, 'Adif', abs(newdf[['A']].add(-SumAvr)))\n newdf.insert(5, 'Sdif', abs(newdf[['S']].add(-SumRGB)))\n df = newdf.sort_values(by=['Sdif', 'Adif'], ascending=True\n ).head(100)\n df.insert(1, 'dalta', abs(df['Rdif'] + df['Gdif'] + df['Bdif'])\n )\n df = df.sort_values(by=['dalta'], ascending=True)\n data = df[['Serial no', 'color']]\n group = data.groupby('Serial no')\n datacount = group.count()\n df = df.merge(datacount, left_on='Serial no', right_index=True)\n df = df.sort_values(by=['color_y'], ascending=False)\n df3 = df.drop_duplicates('Serial no', keep='first', inplace\n =False).head()\n print(df3)\n df3.to_csv('.data base\\\\test_result.csv', index=False,\n encoding='utf_8_sig')\n if df3.empty == True:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '未找到符合資料')\n elif len(df3) <= 2:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '只找到少數資料\\n 已存在test_result')\n else:\n Zero = df3.loc[(df3['Rdif'] == 0) & (df3['Gdif'] == 0) &\n (df3['Bdif'] == 0)]\n Zero = Zero.head(3)\n if Zero.empty == False:\n Zero = Zero.drop(['R', 'G', 'B', 'dalta', 'Rdif',\n 'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',\n 'color_x', 'color_y'], axis=1)\n name = df3['Serial no'].tolist()\n rate = df3['color_y'].tolist()\n col = list(Zero.columns)\n row_text = Zero.iloc[0].tolist()\n df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',\n 'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',\n 'color_x', 'color_y'], axis=1)\n row_df3 = df3.iloc[0].tolist()\n row_df32 = df3.iloc[1].tolist()\n row_df33 = df3.iloc[2].tolist()\n Result_Print()\n print('0')\n print(Zero)\n else:\n filtdf = df3.loc[df3['A'] >= SumAvr]\n filtdf = filtdf.sort_values(by=['Rdif', 'Gdif', 'Bdif']\n ).head()\n Neg_filtdf = df3.loc[df3['A'] < SumAvr]\n Neg_filtdf = Neg_filtdf.sort_values(by=['Rdif',\n 'Gdif', 'Bdif']).head()\n if Neg_filtdf.empty == True and filtdf.empty == True:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '未找到符合資料')\n else:\n filtdf = filtdf.drop(['R', 'G', 'B', 'dalta',\n 'Rdif', 'Gdif', 'Bdif', 'A', 'S', 'Adif',\n 'Sdif', 'color_x', 'color_y'], axis=1)\n name = df3['Serial no'].tolist()\n rate = df3['color_y'].tolist()\n col = list(filtdf.columns)\n row_text = filtdf.iloc[0].tolist()\n df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',\n 'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',\n 'color_x', 'color_y'], axis=1)\n row_df3 = df3.iloc[0].tolist()\n row_df32 = df3.iloc[1].tolist()\n row_df33 = df3.iloc[2].tolist()\n Result_Print()\n print('最接近的為1', filtdf.head(1))\n\n\ndef color_def(BAvr, GAvr, RAvr):\n global color\n if abs(int(BAvr) - int(GAvr)) <= 1 and abs(int(BAvr) - int(RAvr)) <= 1:\n color = 'White'\n return color\n elif BAvr >= GAvr and BAvr >= RAvr:\n if BAvr - GAvr > 3 and BAvr - RAvr >= 3:\n color = 'Blue'\n return color\n elif BAvr - GAvr < 3:\n color = 'Cyan'\n return color\n else:\n color = 'Purple'\n return color\n elif GAvr >= RAvr and GAvr >= BAvr:\n if GAvr - RAvr > 3 or GAvr - BAvr > 3:\n color = 'Green'\n return color\n elif GAvr - RAvr < 3:\n color = 'Yellow'\n return color\n else:\n color = 'Cyan'\n return color\n elif RAvr >= GAvr and RAvr >= BAvr:\n if RAvr - GAvr >= 3 and RAvr - BAvr >= 3:\n color = 'Red'\n return color\n elif RAvr - GAvr < 3:\n color = 'Yellow'\n return color\n else:\n color = 'Purple'\n return color\n else:\n color = 'White'\n\n\n<assignment token>\n<code token>\n\n\ndef main():\n while True:\n cv2.imshow('mouse_callback', img)\n if cv2.waitKey(20) == 27:\n break\n cv2.destroyAllWindows()\n\n\n<code token>\n", "<import token>\n<assignment token>\n<code token>\n\n\ndef quitScreen():\n messagebox.showinfo('collecting data', '點擊視窗開始分析')\n root.destroy()\n root2 = Tk()\n root2.destroy()\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef Result_Print():\n window = Tk()\n window.title('分析結果')\n window.geometry('600x900')\n frame2 = Frame(window)\n frame2.pack(fill='both')\n tablayout = Notebook(frame2)\n tablayout2 = Notebook(frame2)\n ntab1 = Frame(tablayout2)\n ntab1.pack(fill='both')\n for row in range(len(name_n)):\n for column in range(1):\n label = Label(ntab1, width=25, height=2, text=name_n[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name_n)):\n for column in range(1):\n label = Label(ntab1, width=5, height=2, text='%s' % rate_n[row],\n bg='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name_n)):\n for column in range(1):\n label = Label(ntab1, width=12, height=2, text='% 相似程度', bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab1, text='交叉配對結果')\n ntab2 = Frame(tablayout2)\n ntab2.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab2, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab2, width=22, height=1, text=row_nf3[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab2, text='配方1')\n ntab3 = Frame(tablayout2)\n ntab3.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab3, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab3, width=22, height=1, text=row_nf32[row], bg\n ='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab3, text='配方2')\n ntab4 = Frame(tablayout2)\n ntab4.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab4, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab4, width=22, height=1, text=row_nf33[row], bg\n ='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab4, text='配方3')\n ntab5 = Frame(tablayout2)\n ntab5.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab5, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab5, width=22, height=1, text=row_nf3[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab5, text='最接近配方')\n tab1 = Frame(tablayout)\n tab1.pack(fill='both')\n for row in range(len(name)):\n for column in range(1):\n label = Label(tab1, width=25, height=2, text=name[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name)):\n for column in range(1):\n label = Label(tab1, width=5, height=2, text='%s' % rate[row],\n bg='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name)):\n for column in range(1):\n label = Label(tab1, width=12, height=2, text='% 相似程度', bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab1, text='顏色分類結果')\n tab2 = Frame(tablayout)\n tab2.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab2, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab2, width=22, height=1, text=row_df3[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab2, text='配方1')\n tab3 = Frame(tablayout)\n tab3.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab3, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab3, width=22, height=1, text=row_df32[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab3, text='配方2')\n tab4 = Frame(tablayout)\n tab4.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab4, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab4, width=22, height=1, text=row_df33[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab4, text='配方3')\n tab5 = Frame(tablayout)\n tab5.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab5, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab5, width=22, height=1, text=row_text[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab5, text='最接近配方')\n tablayout.pack()\n tablayout2.pack()\n window.mainloop()\n\n\ndef CircleCallback(event, x, y, flags, param):\n n = 8\n global refPt, PtBGR, w, h, Serial, r1, r2, r3, r4, rate, rate2, rate3, r6, r7, r8, r9, add, add2, add3, color, b, g, r, df3, name, rate, col, row_text\n global row_df3, row_df32, row_df33, row_text2, row_nf3, row_nf32, nf3, row_nf33, name_n, rate_n, ncol\n if event == cv2.EVENT_LBUTTONDOWN:\n n = 500\n for c in range(0, n):\n c += 1\n ranx = random.randint(0, 499)\n rany = random.randint(0, 499)\n refPt.append((ranx, rany))\n b, g, r = img[ranx, rany]\n PtBGR.append((b, g, r))\n b = [x[0] for x in PtBGR]\n g = [x[1] for x in PtBGR]\n r = [x[2] for x in PtBGR]\n if len(refPt) == n:\n BAvr = round(sum(b[0:n]) / n)\n GAvr = round(sum(g[0:n]) / n)\n RAvr = round(sum(r[0:n]) / n)\n SumRGB = BAvr + GAvr + RAvr\n SumAvr = round(SumRGB / 3)\n color_def(BAvr, GAvr, RAvr)\n color_name.append(color)\n AvrRGB = {'R': RAvr, 'G': GAvr, 'B': BAvr, 'Sum': SumRGB,\n 'Avr': SumAvr, 'color': color_name}\n df_test = pd.DataFrame(AvrRGB, index=[0])\n dfread = pd.read_csv('.data base\\\\%s' % result2)\n dfread['A'] = round((dfread['R'] + dfread['G'] + dfread['B'\n ]) / 3)\n dfread['S'] = dfread['R'] + dfread['G'] + dfread['B']\n nf = pd.DataFrame(list(zip(r, g, b)), columns=['R', 'G', 'B'])\n nfread = dfread[['Serial no', 'R', 'G', 'B']]\n loan = pd.merge(nf, nfread)\n group = loan.groupby('Serial no')\n Newnf = group.count()\n Newnf['P'] = round(Newnf['R'] / Newnf['R'].sum() * 100)\n Newnf = Newnf.sort_values(by=['R'], ascending=False)\n Rate = Newnf['P'].tolist()\n Newnf.columns = [' '.join(col).strip() for col in Newnf.\n columns.values]\n nf2 = pd.DataFrame(Newnf.to_records())\n nf2 = nf2.head(5)\n print(nf2)\n if len(nf2['Serial no']) == 0:\n i = 0\n j = 0\n k = 0\n elif len(nf2['Serial no']) == 1:\n i = nf2.at[0, 'Serial no']\n j = 0\n k = 0\n elif len(nf2['Serial no']) == 2:\n i = nf2.at[0, 'Serial no']\n j = nf2.at[1, 'Serial no']\n k = 0\n else:\n i = nf2.at[0, 'Serial no']\n j = nf2.at[1, 'Serial no']\n k = nf2.at[2, 'Serial no']\n print(k)\n nf3 = dfread.loc[dfread['Serial no'] == i].head(1)\n nf4 = dfread.loc[dfread['Serial no'] == j].head(1)\n nf5 = dfread.loc[dfread['Serial no'] == k].head(1)\n nf3 = nf3.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)\n nf4 = nf4.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)\n nf5 = nf5.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)\n nf = pd.concat([nf3, nf4, nf5])\n nf.to_csv('.data base\\\\test_result2.csv', index=False,\n encoding='utf_8_sig')\n print(nf)\n ncol = list(nf.columns)\n if len(nf2['Serial no']) == 0:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '未找到符合資料')\n elif len(nf2['Serial no']) == 1:\n row_nf3 = nf3.iloc[0].tolist()\n row_nf32 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',\n 'x', 'x', 'x', 'x', 'x', 'x', 'x']\n row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',\n 'x', 'x', 'x', 'x', 'x', 'x', 'x']\n elif len(nf2['Serial no']) == 2:\n row_nf3 = nf3.iloc[0].tolist()\n row_nf32 = nf4.iloc[0].tolist()\n row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',\n 'x', 'x', 'x', 'x', 'x', 'x', 'x']\n else:\n row_nf3 = nf3.iloc[0].tolist()\n row_nf32 = nf4.iloc[0].tolist()\n print(row_nf32)\n row_nf33 = nf5.iloc[0].tolist()\n name_n = nf['Serial no'].tolist()\n rate_n = Rate\n \"\"\"\n newdf1=dfread.loc[(dfread['color']==color)|(dfread['A']==SumAvr)]\n newdf2=dfread.loc[(dfread['S']<=(SumRGB+2))&(dfread['S']>=(SumRGB-2))]\n newdf=pd.concat([newdf1, newdf2])\n \"\"\"\n \"\"\"\n newdf=dfread.loc[(dfread['A']==SumAvr)|(dfread['S']==SumRGB)]\n newdf=newdf.loc[(newdf['color']==color)]\n \"\"\"\n newdf = dfread.loc[(dfread['color'] == color) | (dfread['A'\n ] == SumAvr) | (dfread['S'] == SumRGB)]\n newdf.insert(1, 'Rdif', newdf[['R']].add(-RAvr))\n newdf.insert(2, 'Gdif', newdf[['G']].add(-GAvr))\n newdf.insert(3, 'Bdif', newdf[['B']].add(-BAvr))\n newdf.insert(4, 'Adif', abs(newdf[['A']].add(-SumAvr)))\n newdf.insert(5, 'Sdif', abs(newdf[['S']].add(-SumRGB)))\n df = newdf.sort_values(by=['Sdif', 'Adif'], ascending=True\n ).head(100)\n df.insert(1, 'dalta', abs(df['Rdif'] + df['Gdif'] + df['Bdif'])\n )\n df = df.sort_values(by=['dalta'], ascending=True)\n data = df[['Serial no', 'color']]\n group = data.groupby('Serial no')\n datacount = group.count()\n df = df.merge(datacount, left_on='Serial no', right_index=True)\n df = df.sort_values(by=['color_y'], ascending=False)\n df3 = df.drop_duplicates('Serial no', keep='first', inplace\n =False).head()\n print(df3)\n df3.to_csv('.data base\\\\test_result.csv', index=False,\n encoding='utf_8_sig')\n if df3.empty == True:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '未找到符合資料')\n elif len(df3) <= 2:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '只找到少數資料\\n 已存在test_result')\n else:\n Zero = df3.loc[(df3['Rdif'] == 0) & (df3['Gdif'] == 0) &\n (df3['Bdif'] == 0)]\n Zero = Zero.head(3)\n if Zero.empty == False:\n Zero = Zero.drop(['R', 'G', 'B', 'dalta', 'Rdif',\n 'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',\n 'color_x', 'color_y'], axis=1)\n name = df3['Serial no'].tolist()\n rate = df3['color_y'].tolist()\n col = list(Zero.columns)\n row_text = Zero.iloc[0].tolist()\n df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',\n 'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',\n 'color_x', 'color_y'], axis=1)\n row_df3 = df3.iloc[0].tolist()\n row_df32 = df3.iloc[1].tolist()\n row_df33 = df3.iloc[2].tolist()\n Result_Print()\n print('0')\n print(Zero)\n else:\n filtdf = df3.loc[df3['A'] >= SumAvr]\n filtdf = filtdf.sort_values(by=['Rdif', 'Gdif', 'Bdif']\n ).head()\n Neg_filtdf = df3.loc[df3['A'] < SumAvr]\n Neg_filtdf = Neg_filtdf.sort_values(by=['Rdif',\n 'Gdif', 'Bdif']).head()\n if Neg_filtdf.empty == True and filtdf.empty == True:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '未找到符合資料')\n else:\n filtdf = filtdf.drop(['R', 'G', 'B', 'dalta',\n 'Rdif', 'Gdif', 'Bdif', 'A', 'S', 'Adif',\n 'Sdif', 'color_x', 'color_y'], axis=1)\n name = df3['Serial no'].tolist()\n rate = df3['color_y'].tolist()\n col = list(filtdf.columns)\n row_text = filtdf.iloc[0].tolist()\n df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',\n 'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',\n 'color_x', 'color_y'], axis=1)\n row_df3 = df3.iloc[0].tolist()\n row_df32 = df3.iloc[1].tolist()\n row_df33 = df3.iloc[2].tolist()\n Result_Print()\n print('最接近的為1', filtdf.head(1))\n\n\ndef color_def(BAvr, GAvr, RAvr):\n global color\n if abs(int(BAvr) - int(GAvr)) <= 1 and abs(int(BAvr) - int(RAvr)) <= 1:\n color = 'White'\n return color\n elif BAvr >= GAvr and BAvr >= RAvr:\n if BAvr - GAvr > 3 and BAvr - RAvr >= 3:\n color = 'Blue'\n return color\n elif BAvr - GAvr < 3:\n color = 'Cyan'\n return color\n else:\n color = 'Purple'\n return color\n elif GAvr >= RAvr and GAvr >= BAvr:\n if GAvr - RAvr > 3 or GAvr - BAvr > 3:\n color = 'Green'\n return color\n elif GAvr - RAvr < 3:\n color = 'Yellow'\n return color\n else:\n color = 'Cyan'\n return color\n elif RAvr >= GAvr and RAvr >= BAvr:\n if RAvr - GAvr >= 3 and RAvr - BAvr >= 3:\n color = 'Red'\n return color\n elif RAvr - GAvr < 3:\n color = 'Yellow'\n return color\n else:\n color = 'Purple'\n return color\n else:\n color = 'White'\n\n\n<assignment token>\n<code token>\n\n\ndef main():\n while True:\n cv2.imshow('mouse_callback', img)\n if cv2.waitKey(20) == 27:\n break\n cv2.destroyAllWindows()\n\n\n<code token>\n", "<import token>\n<assignment token>\n<code token>\n\n\ndef quitScreen():\n messagebox.showinfo('collecting data', '點擊視窗開始分析')\n root.destroy()\n root2 = Tk()\n root2.destroy()\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\ndef CircleCallback(event, x, y, flags, param):\n n = 8\n global refPt, PtBGR, w, h, Serial, r1, r2, r3, r4, rate, rate2, rate3, r6, r7, r8, r9, add, add2, add3, color, b, g, r, df3, name, rate, col, row_text\n global row_df3, row_df32, row_df33, row_text2, row_nf3, row_nf32, nf3, row_nf33, name_n, rate_n, ncol\n if event == cv2.EVENT_LBUTTONDOWN:\n n = 500\n for c in range(0, n):\n c += 1\n ranx = random.randint(0, 499)\n rany = random.randint(0, 499)\n refPt.append((ranx, rany))\n b, g, r = img[ranx, rany]\n PtBGR.append((b, g, r))\n b = [x[0] for x in PtBGR]\n g = [x[1] for x in PtBGR]\n r = [x[2] for x in PtBGR]\n if len(refPt) == n:\n BAvr = round(sum(b[0:n]) / n)\n GAvr = round(sum(g[0:n]) / n)\n RAvr = round(sum(r[0:n]) / n)\n SumRGB = BAvr + GAvr + RAvr\n SumAvr = round(SumRGB / 3)\n color_def(BAvr, GAvr, RAvr)\n color_name.append(color)\n AvrRGB = {'R': RAvr, 'G': GAvr, 'B': BAvr, 'Sum': SumRGB,\n 'Avr': SumAvr, 'color': color_name}\n df_test = pd.DataFrame(AvrRGB, index=[0])\n dfread = pd.read_csv('.data base\\\\%s' % result2)\n dfread['A'] = round((dfread['R'] + dfread['G'] + dfread['B'\n ]) / 3)\n dfread['S'] = dfread['R'] + dfread['G'] + dfread['B']\n nf = pd.DataFrame(list(zip(r, g, b)), columns=['R', 'G', 'B'])\n nfread = dfread[['Serial no', 'R', 'G', 'B']]\n loan = pd.merge(nf, nfread)\n group = loan.groupby('Serial no')\n Newnf = group.count()\n Newnf['P'] = round(Newnf['R'] / Newnf['R'].sum() * 100)\n Newnf = Newnf.sort_values(by=['R'], ascending=False)\n Rate = Newnf['P'].tolist()\n Newnf.columns = [' '.join(col).strip() for col in Newnf.\n columns.values]\n nf2 = pd.DataFrame(Newnf.to_records())\n nf2 = nf2.head(5)\n print(nf2)\n if len(nf2['Serial no']) == 0:\n i = 0\n j = 0\n k = 0\n elif len(nf2['Serial no']) == 1:\n i = nf2.at[0, 'Serial no']\n j = 0\n k = 0\n elif len(nf2['Serial no']) == 2:\n i = nf2.at[0, 'Serial no']\n j = nf2.at[1, 'Serial no']\n k = 0\n else:\n i = nf2.at[0, 'Serial no']\n j = nf2.at[1, 'Serial no']\n k = nf2.at[2, 'Serial no']\n print(k)\n nf3 = dfread.loc[dfread['Serial no'] == i].head(1)\n nf4 = dfread.loc[dfread['Serial no'] == j].head(1)\n nf5 = dfread.loc[dfread['Serial no'] == k].head(1)\n nf3 = nf3.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)\n nf4 = nf4.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)\n nf5 = nf5.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)\n nf = pd.concat([nf3, nf4, nf5])\n nf.to_csv('.data base\\\\test_result2.csv', index=False,\n encoding='utf_8_sig')\n print(nf)\n ncol = list(nf.columns)\n if len(nf2['Serial no']) == 0:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '未找到符合資料')\n elif len(nf2['Serial no']) == 1:\n row_nf3 = nf3.iloc[0].tolist()\n row_nf32 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',\n 'x', 'x', 'x', 'x', 'x', 'x', 'x']\n row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',\n 'x', 'x', 'x', 'x', 'x', 'x', 'x']\n elif len(nf2['Serial no']) == 2:\n row_nf3 = nf3.iloc[0].tolist()\n row_nf32 = nf4.iloc[0].tolist()\n row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',\n 'x', 'x', 'x', 'x', 'x', 'x', 'x']\n else:\n row_nf3 = nf3.iloc[0].tolist()\n row_nf32 = nf4.iloc[0].tolist()\n print(row_nf32)\n row_nf33 = nf5.iloc[0].tolist()\n name_n = nf['Serial no'].tolist()\n rate_n = Rate\n \"\"\"\n newdf1=dfread.loc[(dfread['color']==color)|(dfread['A']==SumAvr)]\n newdf2=dfread.loc[(dfread['S']<=(SumRGB+2))&(dfread['S']>=(SumRGB-2))]\n newdf=pd.concat([newdf1, newdf2])\n \"\"\"\n \"\"\"\n newdf=dfread.loc[(dfread['A']==SumAvr)|(dfread['S']==SumRGB)]\n newdf=newdf.loc[(newdf['color']==color)]\n \"\"\"\n newdf = dfread.loc[(dfread['color'] == color) | (dfread['A'\n ] == SumAvr) | (dfread['S'] == SumRGB)]\n newdf.insert(1, 'Rdif', newdf[['R']].add(-RAvr))\n newdf.insert(2, 'Gdif', newdf[['G']].add(-GAvr))\n newdf.insert(3, 'Bdif', newdf[['B']].add(-BAvr))\n newdf.insert(4, 'Adif', abs(newdf[['A']].add(-SumAvr)))\n newdf.insert(5, 'Sdif', abs(newdf[['S']].add(-SumRGB)))\n df = newdf.sort_values(by=['Sdif', 'Adif'], ascending=True\n ).head(100)\n df.insert(1, 'dalta', abs(df['Rdif'] + df['Gdif'] + df['Bdif'])\n )\n df = df.sort_values(by=['dalta'], ascending=True)\n data = df[['Serial no', 'color']]\n group = data.groupby('Serial no')\n datacount = group.count()\n df = df.merge(datacount, left_on='Serial no', right_index=True)\n df = df.sort_values(by=['color_y'], ascending=False)\n df3 = df.drop_duplicates('Serial no', keep='first', inplace\n =False).head()\n print(df3)\n df3.to_csv('.data base\\\\test_result.csv', index=False,\n encoding='utf_8_sig')\n if df3.empty == True:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '未找到符合資料')\n elif len(df3) <= 2:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '只找到少數資料\\n 已存在test_result')\n else:\n Zero = df3.loc[(df3['Rdif'] == 0) & (df3['Gdif'] == 0) &\n (df3['Bdif'] == 0)]\n Zero = Zero.head(3)\n if Zero.empty == False:\n Zero = Zero.drop(['R', 'G', 'B', 'dalta', 'Rdif',\n 'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',\n 'color_x', 'color_y'], axis=1)\n name = df3['Serial no'].tolist()\n rate = df3['color_y'].tolist()\n col = list(Zero.columns)\n row_text = Zero.iloc[0].tolist()\n df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',\n 'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',\n 'color_x', 'color_y'], axis=1)\n row_df3 = df3.iloc[0].tolist()\n row_df32 = df3.iloc[1].tolist()\n row_df33 = df3.iloc[2].tolist()\n Result_Print()\n print('0')\n print(Zero)\n else:\n filtdf = df3.loc[df3['A'] >= SumAvr]\n filtdf = filtdf.sort_values(by=['Rdif', 'Gdif', 'Bdif']\n ).head()\n Neg_filtdf = df3.loc[df3['A'] < SumAvr]\n Neg_filtdf = Neg_filtdf.sort_values(by=['Rdif',\n 'Gdif', 'Bdif']).head()\n if Neg_filtdf.empty == True and filtdf.empty == True:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '未找到符合資料')\n else:\n filtdf = filtdf.drop(['R', 'G', 'B', 'dalta',\n 'Rdif', 'Gdif', 'Bdif', 'A', 'S', 'Adif',\n 'Sdif', 'color_x', 'color_y'], axis=1)\n name = df3['Serial no'].tolist()\n rate = df3['color_y'].tolist()\n col = list(filtdf.columns)\n row_text = filtdf.iloc[0].tolist()\n df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',\n 'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',\n 'color_x', 'color_y'], axis=1)\n row_df3 = df3.iloc[0].tolist()\n row_df32 = df3.iloc[1].tolist()\n row_df33 = df3.iloc[2].tolist()\n Result_Print()\n print('最接近的為1', filtdf.head(1))\n\n\ndef color_def(BAvr, GAvr, RAvr):\n global color\n if abs(int(BAvr) - int(GAvr)) <= 1 and abs(int(BAvr) - int(RAvr)) <= 1:\n color = 'White'\n return color\n elif BAvr >= GAvr and BAvr >= RAvr:\n if BAvr - GAvr > 3 and BAvr - RAvr >= 3:\n color = 'Blue'\n return color\n elif BAvr - GAvr < 3:\n color = 'Cyan'\n return color\n else:\n color = 'Purple'\n return color\n elif GAvr >= RAvr and GAvr >= BAvr:\n if GAvr - RAvr > 3 or GAvr - BAvr > 3:\n color = 'Green'\n return color\n elif GAvr - RAvr < 3:\n color = 'Yellow'\n return color\n else:\n color = 'Cyan'\n return color\n elif RAvr >= GAvr and RAvr >= BAvr:\n if RAvr - GAvr >= 3 and RAvr - BAvr >= 3:\n color = 'Red'\n return color\n elif RAvr - GAvr < 3:\n color = 'Yellow'\n return color\n else:\n color = 'Purple'\n return color\n else:\n color = 'White'\n\n\n<assignment token>\n<code token>\n\n\ndef main():\n while True:\n cv2.imshow('mouse_callback', img)\n if cv2.waitKey(20) == 27:\n break\n cv2.destroyAllWindows()\n\n\n<code token>\n", "<import token>\n<assignment token>\n<code token>\n\n\ndef quitScreen():\n messagebox.showinfo('collecting data', '點擊視窗開始分析')\n root.destroy()\n root2 = Tk()\n root2.destroy()\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n\n\ndef color_def(BAvr, GAvr, RAvr):\n global color\n if abs(int(BAvr) - int(GAvr)) <= 1 and abs(int(BAvr) - int(RAvr)) <= 1:\n color = 'White'\n return color\n elif BAvr >= GAvr and BAvr >= RAvr:\n if BAvr - GAvr > 3 and BAvr - RAvr >= 3:\n color = 'Blue'\n return color\n elif BAvr - GAvr < 3:\n color = 'Cyan'\n return color\n else:\n color = 'Purple'\n return color\n elif GAvr >= RAvr and GAvr >= BAvr:\n if GAvr - RAvr > 3 or GAvr - BAvr > 3:\n color = 'Green'\n return color\n elif GAvr - RAvr < 3:\n color = 'Yellow'\n return color\n else:\n color = 'Cyan'\n return color\n elif RAvr >= GAvr and RAvr >= BAvr:\n if RAvr - GAvr >= 3 and RAvr - BAvr >= 3:\n color = 'Red'\n return color\n elif RAvr - GAvr < 3:\n color = 'Yellow'\n return color\n else:\n color = 'Purple'\n return color\n else:\n color = 'White'\n\n\n<assignment token>\n<code token>\n\n\ndef main():\n while True:\n cv2.imshow('mouse_callback', img)\n if cv2.waitKey(20) == 27:\n break\n cv2.destroyAllWindows()\n\n\n<code token>\n", "<import token>\n<assignment token>\n<code token>\n\n\ndef quitScreen():\n messagebox.showinfo('collecting data', '點擊視窗開始分析')\n root.destroy()\n root2 = Tk()\n root2.destroy()\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n\n\ndef main():\n while True:\n cv2.imshow('mouse_callback', img)\n if cv2.waitKey(20) == 27:\n break\n cv2.destroyAllWindows()\n\n\n<code token>\n", "<import token>\n<assignment token>\n<code token>\n\n\ndef quitScreen():\n messagebox.showinfo('collecting data', '點擊視窗開始分析')\n root.destroy()\n root2 = Tk()\n root2.destroy()\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<function token>\n<code token>\n", "<import token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<function token>\n<code token>\n" ]
false
313
dca36de5556b120b8b93eac0ad7b971ad735d907
import numpy as np def GradientDescent(f, gradf, x0, epsilon, num_iter, line_search, disp=False, callback=None, **kwargs): x = x0.copy() iteration = 0 opt_arg = {"f": f, "grad_f": gradf} for key in kwargs: opt_arg[key] = kwargs[key] while True: gradient = -gradf(x) alpha = line_search(x, gradient, **opt_arg) x = x + alpha * gradient if callback is not None: callback(x) iteration += 1 if disp: print("Current function val =", f(x)) print("Current gradient norm = ", np.linalg.norm(gradf(x))) if np.linalg.norm(gradf(x)) < epsilon: break if iteration >= num_iter: break res = {"x": x, "num_iter": iteration, "tol": np.linalg.norm(gradf(x))} return res def backtracking(x, descent_dir, **kwargs): f = kwargs["f"] grad_f = kwargs["grad_f"] if kwargs["method"] == "Armijo": beta1 = kwargs["beta1"] rho = kwargs["rho"] alpha = 1 while f(x + alpha * descent_dir) >= f(x) + beta1 * alpha * grad_f(x).dot(descent_dir) or np.isnan(f(x + alpha * descent_dir)): alpha *= rho return alpha
[ "import numpy as np\n\ndef GradientDescent(f, gradf, x0, epsilon, num_iter, line_search, \n disp=False, callback=None, **kwargs):\n\tx = x0.copy()\n\titeration = 0\n\topt_arg = {\"f\": f, \"grad_f\": gradf}\n\tfor key in kwargs:\n\t\topt_arg[key] = kwargs[key]\n\twhile True:\n\t\tgradient = -gradf(x)\n\t\talpha = line_search(x, gradient, **opt_arg)\n\t\tx = x + alpha * gradient\n\t\tif callback is not None:\n\t\t\tcallback(x)\n\t\titeration += 1\n\t\tif disp:\n\t\t\tprint(\"Current function val =\", f(x))\n\t\t\tprint(\"Current gradient norm = \", np.linalg.norm(gradf(x)))\n\t\tif np.linalg.norm(gradf(x)) < epsilon:\n\t\t\tbreak\n\t\tif iteration >= num_iter:\n\t\t\tbreak\n\tres = {\"x\": x, \"num_iter\": iteration, \"tol\": np.linalg.norm(gradf(x))}\n\treturn res\n\ndef backtracking(x, descent_dir, **kwargs):\n f = kwargs[\"f\"]\n grad_f = kwargs[\"grad_f\"] \n if kwargs[\"method\"] == \"Armijo\":\n beta1 = kwargs[\"beta1\"]\n rho = kwargs[\"rho\"]\n alpha = 1\n while f(x + alpha * descent_dir) >= f(x) + beta1 * alpha * grad_f(x).dot(descent_dir) or np.isnan(f(x + alpha * descent_dir)):\n alpha *= rho\n return alpha\n", "import numpy as np\n\n\ndef GradientDescent(f, gradf, x0, epsilon, num_iter, line_search, disp=\n False, callback=None, **kwargs):\n x = x0.copy()\n iteration = 0\n opt_arg = {'f': f, 'grad_f': gradf}\n for key in kwargs:\n opt_arg[key] = kwargs[key]\n while True:\n gradient = -gradf(x)\n alpha = line_search(x, gradient, **opt_arg)\n x = x + alpha * gradient\n if callback is not None:\n callback(x)\n iteration += 1\n if disp:\n print('Current function val =', f(x))\n print('Current gradient norm = ', np.linalg.norm(gradf(x)))\n if np.linalg.norm(gradf(x)) < epsilon:\n break\n if iteration >= num_iter:\n break\n res = {'x': x, 'num_iter': iteration, 'tol': np.linalg.norm(gradf(x))}\n return res\n\n\ndef backtracking(x, descent_dir, **kwargs):\n f = kwargs['f']\n grad_f = kwargs['grad_f']\n if kwargs['method'] == 'Armijo':\n beta1 = kwargs['beta1']\n rho = kwargs['rho']\n alpha = 1\n while f(x + alpha * descent_dir) >= f(x) + beta1 * alpha * grad_f(x\n ).dot(descent_dir) or np.isnan(f(x + alpha * descent_dir)):\n alpha *= rho\n return alpha\n", "<import token>\n\n\ndef GradientDescent(f, gradf, x0, epsilon, num_iter, line_search, disp=\n False, callback=None, **kwargs):\n x = x0.copy()\n iteration = 0\n opt_arg = {'f': f, 'grad_f': gradf}\n for key in kwargs:\n opt_arg[key] = kwargs[key]\n while True:\n gradient = -gradf(x)\n alpha = line_search(x, gradient, **opt_arg)\n x = x + alpha * gradient\n if callback is not None:\n callback(x)\n iteration += 1\n if disp:\n print('Current function val =', f(x))\n print('Current gradient norm = ', np.linalg.norm(gradf(x)))\n if np.linalg.norm(gradf(x)) < epsilon:\n break\n if iteration >= num_iter:\n break\n res = {'x': x, 'num_iter': iteration, 'tol': np.linalg.norm(gradf(x))}\n return res\n\n\ndef backtracking(x, descent_dir, **kwargs):\n f = kwargs['f']\n grad_f = kwargs['grad_f']\n if kwargs['method'] == 'Armijo':\n beta1 = kwargs['beta1']\n rho = kwargs['rho']\n alpha = 1\n while f(x + alpha * descent_dir) >= f(x) + beta1 * alpha * grad_f(x\n ).dot(descent_dir) or np.isnan(f(x + alpha * descent_dir)):\n alpha *= rho\n return alpha\n", "<import token>\n<function token>\n\n\ndef backtracking(x, descent_dir, **kwargs):\n f = kwargs['f']\n grad_f = kwargs['grad_f']\n if kwargs['method'] == 'Armijo':\n beta1 = kwargs['beta1']\n rho = kwargs['rho']\n alpha = 1\n while f(x + alpha * descent_dir) >= f(x) + beta1 * alpha * grad_f(x\n ).dot(descent_dir) or np.isnan(f(x + alpha * descent_dir)):\n alpha *= rho\n return alpha\n", "<import token>\n<function token>\n<function token>\n" ]
false
314
4711adcc7c95993ec13b9d06fa674aa064f79bfd
import torch import torch.nn as nn import torch.nn.functional as F class Net(torch.nn.Module): def __init__(self, layer_sizes=[256, 128, 2], dropout_prob=None, device=None): super(Net, self).__init__() self.device = device if dropout_prob is not None and dropout_prob > 0.5: print("Are you sure dropout_prob is supposed to be greater than 0.5?") # Load Roberta self.roberta = torch.hub.load( "pytorch/fairseq", "roberta.base", pretrained=True ) for param in self.roberta.parameters(): param.requires_grad = False self.roberta.eval() # Load ResNet resnet_full = torch.hub.load( "pytorch/vision:v0.6.0", "resnet18", pretrained=True ) self.resnet = torch.nn.Sequential(*list(resnet_full.children())[:-1]) # for param in self.resnet.parameters(): # param.requires_grad = False # self.resnet.eval() # self.lstm = nn.LSTM(input_size=768, hidden_size=768 * 2) # self.lstm.eval() # Layers self.bns = nn.ModuleList() self.fcs = nn.ModuleList() self.drops = None if dropout_prob is None else nn.ModuleList() prev_size = 2 * 512 + 2 * 768 + 2 * 10 + 2 * 2 for i, size in enumerate(layer_sizes): self.bns.append(nn.BatchNorm1d(prev_size)) self.fcs.append(nn.Linear(prev_size, size)) if dropout_prob is not None: self.drops.append(nn.Dropout(p=dropout_prob)) prev_size = size def forward(self, inputs): first_images = inputs["image1"].to(self.device) first_text = inputs["text1"] first_length = inputs["length1"].to(self.device) first_categories = inputs["categories1"].to(self.device) first_days_posted = inputs["days_posted1"].to(self.device) second_images = inputs["image2"].to(self.device) second_text = inputs["text2"] second_length = inputs["length2"].to(self.device) second_categories = inputs["categories2"].to(self.device) second_days_posted = inputs["days_posted2"].to(self.device) # Resnet image_tensor_one = self.resnet.forward(first_images) image_tensor_two = self.resnet.forward(second_images) # Roberta text_features1 = torch.Tensor() text_features2 = torch.Tensor() text_features1 = text_features1.to(self.device) text_features2 = text_features2.to(self.device) for text in first_text: first_tokens = self.roberta.encode(text)[:512] features = self.roberta.extract_features(first_tokens) feature_means = torch.mean(features, dim=1) # features = torch.reshape(features, (-1, 1,768)) # output, (hn, cn) = self.lstm(features) # cn = torch.reshape(cn, (1, 768 * 2)) text_features1 = torch.cat([text_features1, feature_means]) for text in second_text: second_tokens = self.roberta.encode(text)[:512] features = self.roberta.extract_features(second_tokens) # print("DIMENSION OF FEATURES ", features.shape) feature_means = torch.mean(features, dim=1) # features = torch.reshape(features, (-1, 1,768)) # output, (hn, cn) = self.lstm(features) # cn = torch.reshape(cn, (1, 768 * 2)) # print("DIMENSION OF FEATURES ", features.shape) text_features2 = torch.cat([text_features2, feature_means]) # Concatenated tensor concat_tensor = torch.cat((image_tensor_one, image_tensor_two), 1) concat_tensor = torch.squeeze(concat_tensor) concat_tensor = torch.cat((text_features1, text_features2, concat_tensor), 1) additional_features = torch.cat( [ torch.reshape(first_length, (-1, 1)), torch.reshape(second_length, (-1, 1)), torch.reshape(first_days_posted, (-1, 1)), torch.reshape(second_days_posted, (-1, 1)), ], dim=1, ) concat_tensor = torch.cat( [ concat_tensor, additional_features.float(), first_categories.float(), second_categories.float(), ], dim=1, ) x = concat_tensor zipped_layers = ( zip(self.bns, self.fcs, [None] * len(self.bns)) if self.drops is None else zip(self.bns, self.fcs, self.drops) ) for i, (bn, fc, drop) in enumerate(zipped_layers): x = bn(x) if drop is not None: x = drop(x) if i == len(self.bns) - 1: x = fc(x) else: x = F.relu(fc(x)) return x
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Net(torch.nn.Module):\n def __init__(self, layer_sizes=[256, 128, 2], dropout_prob=None, device=None):\n super(Net, self).__init__()\n self.device = device\n\n if dropout_prob is not None and dropout_prob > 0.5:\n print(\"Are you sure dropout_prob is supposed to be greater than 0.5?\")\n\n # Load Roberta\n self.roberta = torch.hub.load(\n \"pytorch/fairseq\", \"roberta.base\", pretrained=True\n )\n for param in self.roberta.parameters():\n param.requires_grad = False\n self.roberta.eval()\n\n # Load ResNet\n resnet_full = torch.hub.load(\n \"pytorch/vision:v0.6.0\", \"resnet18\", pretrained=True\n )\n self.resnet = torch.nn.Sequential(*list(resnet_full.children())[:-1])\n # for param in self.resnet.parameters():\n # param.requires_grad = False\n # self.resnet.eval()\n\n # self.lstm = nn.LSTM(input_size=768, hidden_size=768 * 2)\n # self.lstm.eval()\n\n # Layers\n self.bns = nn.ModuleList()\n self.fcs = nn.ModuleList()\n self.drops = None if dropout_prob is None else nn.ModuleList()\n prev_size = 2 * 512 + 2 * 768 + 2 * 10 + 2 * 2\n for i, size in enumerate(layer_sizes):\n self.bns.append(nn.BatchNorm1d(prev_size))\n self.fcs.append(nn.Linear(prev_size, size))\n if dropout_prob is not None:\n self.drops.append(nn.Dropout(p=dropout_prob))\n prev_size = size\n\n def forward(self, inputs):\n first_images = inputs[\"image1\"].to(self.device)\n first_text = inputs[\"text1\"]\n first_length = inputs[\"length1\"].to(self.device)\n first_categories = inputs[\"categories1\"].to(self.device)\n first_days_posted = inputs[\"days_posted1\"].to(self.device)\n\n second_images = inputs[\"image2\"].to(self.device)\n second_text = inputs[\"text2\"]\n second_length = inputs[\"length2\"].to(self.device)\n second_categories = inputs[\"categories2\"].to(self.device)\n second_days_posted = inputs[\"days_posted2\"].to(self.device)\n\n # Resnet\n image_tensor_one = self.resnet.forward(first_images)\n image_tensor_two = self.resnet.forward(second_images)\n # Roberta\n text_features1 = torch.Tensor()\n text_features2 = torch.Tensor()\n text_features1 = text_features1.to(self.device)\n text_features2 = text_features2.to(self.device)\n for text in first_text:\n first_tokens = self.roberta.encode(text)[:512]\n features = self.roberta.extract_features(first_tokens)\n feature_means = torch.mean(features, dim=1)\n # features = torch.reshape(features, (-1, 1,768))\n # output, (hn, cn) = self.lstm(features)\n # cn = torch.reshape(cn, (1, 768 * 2))\n text_features1 = torch.cat([text_features1, feature_means])\n for text in second_text:\n second_tokens = self.roberta.encode(text)[:512]\n features = self.roberta.extract_features(second_tokens)\n # print(\"DIMENSION OF FEATURES \", features.shape)\n feature_means = torch.mean(features, dim=1)\n # features = torch.reshape(features, (-1, 1,768))\n # output, (hn, cn) = self.lstm(features)\n # cn = torch.reshape(cn, (1, 768 * 2))\n # print(\"DIMENSION OF FEATURES \", features.shape)\n text_features2 = torch.cat([text_features2, feature_means])\n\n # Concatenated tensor\n concat_tensor = torch.cat((image_tensor_one, image_tensor_two), 1)\n concat_tensor = torch.squeeze(concat_tensor)\n concat_tensor = torch.cat((text_features1, text_features2, concat_tensor), 1)\n additional_features = torch.cat(\n [\n torch.reshape(first_length, (-1, 1)),\n torch.reshape(second_length, (-1, 1)),\n torch.reshape(first_days_posted, (-1, 1)),\n torch.reshape(second_days_posted, (-1, 1)),\n ],\n dim=1,\n )\n concat_tensor = torch.cat(\n [\n concat_tensor,\n additional_features.float(),\n first_categories.float(),\n second_categories.float(),\n ],\n dim=1,\n )\n\n x = concat_tensor\n zipped_layers = (\n zip(self.bns, self.fcs, [None] * len(self.bns))\n if self.drops is None\n else zip(self.bns, self.fcs, self.drops)\n )\n for i, (bn, fc, drop) in enumerate(zipped_layers):\n x = bn(x)\n if drop is not None:\n x = drop(x)\n if i == len(self.bns) - 1:\n x = fc(x)\n else:\n x = F.relu(fc(x))\n\n return x\n", "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Net(torch.nn.Module):\n\n def __init__(self, layer_sizes=[256, 128, 2], dropout_prob=None, device\n =None):\n super(Net, self).__init__()\n self.device = device\n if dropout_prob is not None and dropout_prob > 0.5:\n print(\n 'Are you sure dropout_prob is supposed to be greater than 0.5?'\n )\n self.roberta = torch.hub.load('pytorch/fairseq', 'roberta.base',\n pretrained=True)\n for param in self.roberta.parameters():\n param.requires_grad = False\n self.roberta.eval()\n resnet_full = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18',\n pretrained=True)\n self.resnet = torch.nn.Sequential(*list(resnet_full.children())[:-1])\n self.bns = nn.ModuleList()\n self.fcs = nn.ModuleList()\n self.drops = None if dropout_prob is None else nn.ModuleList()\n prev_size = 2 * 512 + 2 * 768 + 2 * 10 + 2 * 2\n for i, size in enumerate(layer_sizes):\n self.bns.append(nn.BatchNorm1d(prev_size))\n self.fcs.append(nn.Linear(prev_size, size))\n if dropout_prob is not None:\n self.drops.append(nn.Dropout(p=dropout_prob))\n prev_size = size\n\n def forward(self, inputs):\n first_images = inputs['image1'].to(self.device)\n first_text = inputs['text1']\n first_length = inputs['length1'].to(self.device)\n first_categories = inputs['categories1'].to(self.device)\n first_days_posted = inputs['days_posted1'].to(self.device)\n second_images = inputs['image2'].to(self.device)\n second_text = inputs['text2']\n second_length = inputs['length2'].to(self.device)\n second_categories = inputs['categories2'].to(self.device)\n second_days_posted = inputs['days_posted2'].to(self.device)\n image_tensor_one = self.resnet.forward(first_images)\n image_tensor_two = self.resnet.forward(second_images)\n text_features1 = torch.Tensor()\n text_features2 = torch.Tensor()\n text_features1 = text_features1.to(self.device)\n text_features2 = text_features2.to(self.device)\n for text in first_text:\n first_tokens = self.roberta.encode(text)[:512]\n features = self.roberta.extract_features(first_tokens)\n feature_means = torch.mean(features, dim=1)\n text_features1 = torch.cat([text_features1, feature_means])\n for text in second_text:\n second_tokens = self.roberta.encode(text)[:512]\n features = self.roberta.extract_features(second_tokens)\n feature_means = torch.mean(features, dim=1)\n text_features2 = torch.cat([text_features2, feature_means])\n concat_tensor = torch.cat((image_tensor_one, image_tensor_two), 1)\n concat_tensor = torch.squeeze(concat_tensor)\n concat_tensor = torch.cat((text_features1, text_features2,\n concat_tensor), 1)\n additional_features = torch.cat([torch.reshape(first_length, (-1, 1\n )), torch.reshape(second_length, (-1, 1)), torch.reshape(\n first_days_posted, (-1, 1)), torch.reshape(second_days_posted,\n (-1, 1))], dim=1)\n concat_tensor = torch.cat([concat_tensor, additional_features.float\n (), first_categories.float(), second_categories.float()], dim=1)\n x = concat_tensor\n zipped_layers = zip(self.bns, self.fcs, [None] * len(self.bns)\n ) if self.drops is None else zip(self.bns, self.fcs, self.drops)\n for i, (bn, fc, drop) in enumerate(zipped_layers):\n x = bn(x)\n if drop is not None:\n x = drop(x)\n if i == len(self.bns) - 1:\n x = fc(x)\n else:\n x = F.relu(fc(x))\n return x\n", "<import token>\n\n\nclass Net(torch.nn.Module):\n\n def __init__(self, layer_sizes=[256, 128, 2], dropout_prob=None, device\n =None):\n super(Net, self).__init__()\n self.device = device\n if dropout_prob is not None and dropout_prob > 0.5:\n print(\n 'Are you sure dropout_prob is supposed to be greater than 0.5?'\n )\n self.roberta = torch.hub.load('pytorch/fairseq', 'roberta.base',\n pretrained=True)\n for param in self.roberta.parameters():\n param.requires_grad = False\n self.roberta.eval()\n resnet_full = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18',\n pretrained=True)\n self.resnet = torch.nn.Sequential(*list(resnet_full.children())[:-1])\n self.bns = nn.ModuleList()\n self.fcs = nn.ModuleList()\n self.drops = None if dropout_prob is None else nn.ModuleList()\n prev_size = 2 * 512 + 2 * 768 + 2 * 10 + 2 * 2\n for i, size in enumerate(layer_sizes):\n self.bns.append(nn.BatchNorm1d(prev_size))\n self.fcs.append(nn.Linear(prev_size, size))\n if dropout_prob is not None:\n self.drops.append(nn.Dropout(p=dropout_prob))\n prev_size = size\n\n def forward(self, inputs):\n first_images = inputs['image1'].to(self.device)\n first_text = inputs['text1']\n first_length = inputs['length1'].to(self.device)\n first_categories = inputs['categories1'].to(self.device)\n first_days_posted = inputs['days_posted1'].to(self.device)\n second_images = inputs['image2'].to(self.device)\n second_text = inputs['text2']\n second_length = inputs['length2'].to(self.device)\n second_categories = inputs['categories2'].to(self.device)\n second_days_posted = inputs['days_posted2'].to(self.device)\n image_tensor_one = self.resnet.forward(first_images)\n image_tensor_two = self.resnet.forward(second_images)\n text_features1 = torch.Tensor()\n text_features2 = torch.Tensor()\n text_features1 = text_features1.to(self.device)\n text_features2 = text_features2.to(self.device)\n for text in first_text:\n first_tokens = self.roberta.encode(text)[:512]\n features = self.roberta.extract_features(first_tokens)\n feature_means = torch.mean(features, dim=1)\n text_features1 = torch.cat([text_features1, feature_means])\n for text in second_text:\n second_tokens = self.roberta.encode(text)[:512]\n features = self.roberta.extract_features(second_tokens)\n feature_means = torch.mean(features, dim=1)\n text_features2 = torch.cat([text_features2, feature_means])\n concat_tensor = torch.cat((image_tensor_one, image_tensor_two), 1)\n concat_tensor = torch.squeeze(concat_tensor)\n concat_tensor = torch.cat((text_features1, text_features2,\n concat_tensor), 1)\n additional_features = torch.cat([torch.reshape(first_length, (-1, 1\n )), torch.reshape(second_length, (-1, 1)), torch.reshape(\n first_days_posted, (-1, 1)), torch.reshape(second_days_posted,\n (-1, 1))], dim=1)\n concat_tensor = torch.cat([concat_tensor, additional_features.float\n (), first_categories.float(), second_categories.float()], dim=1)\n x = concat_tensor\n zipped_layers = zip(self.bns, self.fcs, [None] * len(self.bns)\n ) if self.drops is None else zip(self.bns, self.fcs, self.drops)\n for i, (bn, fc, drop) in enumerate(zipped_layers):\n x = bn(x)\n if drop is not None:\n x = drop(x)\n if i == len(self.bns) - 1:\n x = fc(x)\n else:\n x = F.relu(fc(x))\n return x\n", "<import token>\n\n\nclass Net(torch.nn.Module):\n\n def __init__(self, layer_sizes=[256, 128, 2], dropout_prob=None, device\n =None):\n super(Net, self).__init__()\n self.device = device\n if dropout_prob is not None and dropout_prob > 0.5:\n print(\n 'Are you sure dropout_prob is supposed to be greater than 0.5?'\n )\n self.roberta = torch.hub.load('pytorch/fairseq', 'roberta.base',\n pretrained=True)\n for param in self.roberta.parameters():\n param.requires_grad = False\n self.roberta.eval()\n resnet_full = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18',\n pretrained=True)\n self.resnet = torch.nn.Sequential(*list(resnet_full.children())[:-1])\n self.bns = nn.ModuleList()\n self.fcs = nn.ModuleList()\n self.drops = None if dropout_prob is None else nn.ModuleList()\n prev_size = 2 * 512 + 2 * 768 + 2 * 10 + 2 * 2\n for i, size in enumerate(layer_sizes):\n self.bns.append(nn.BatchNorm1d(prev_size))\n self.fcs.append(nn.Linear(prev_size, size))\n if dropout_prob is not None:\n self.drops.append(nn.Dropout(p=dropout_prob))\n prev_size = size\n <function token>\n", "<import token>\n\n\nclass Net(torch.nn.Module):\n <function token>\n <function token>\n", "<import token>\n<class token>\n" ]
false
315
3479276d4769518aa60dcd4e1bb41a8a1a7d6517
import os from multiprocessing import Pool import glob import click import logging import pandas as pd from src.resampling.resampling import Resampler # Default paths path_in = 'data/hecktor_nii/' path_out = 'data/resampled/' path_bb = 'data/bbox.csv' @click.command() @click.argument('input_folder', type=click.Path(exists=True), default=path_in) @click.argument('output_folder', type=click.Path(), default=path_out) @click.argument('bounding_boxes_file', type=click.Path(), default=path_bb) @click.option('--cores', type=click.INT, default=12, help='The number of workers for parallelization.') @click.option('--resampling', type=click.FLOAT, nargs=3, default=(1, 1, 1), help='Expect 3 positive floats describing the output ' 'resolution of the resampling. To avoid resampling ' 'on one or more dimension a value of -1 can be fed ' 'e.g. --resampling 1.0 1.0 -1 will resample the x ' 'and y axis at 1 mm/px and left the z axis untouched.') @click.option('--order', type=click.INT, nargs=1, default=3, help='The order of the spline interpolation used to resample') def main(input_folder, output_folder, bounding_boxes_file, cores, resampling, order): """ This command line interface allows to resample NIFTI files within a given bounding box contain in BOUNDING_BOXES_FILE. The images are resampled with spline interpolation of degree --order (default=3) and the segmentation are resampled by nearest neighbor interpolation. INPUT_FOLDER is the path of the folder containing the NIFTI to resample. OUTPUT_FOLDER is the path of the folder where to store the resampled NIFTI files. BOUNDING_BOXES_FILE is the path of the .csv file containing the bounding boxes of each patient. """ logger = logging.getLogger(__name__) logger.info('Resampling') if not os.path.exists(output_folder): os.mkdir(output_folder) print('resampling is {}'.format(str(resampling))) bb_df = pd.read_csv(bounding_boxes_file) bb_df = bb_df.set_index('PatientID') files_list = [ f for f in glob.glob(input_folder + '/**/*.nii.gz', recursive=True) ] resampler = Resampler(bb_df, output_folder, order, resampling=resampling) with Pool(cores) as p: p.map(resampler, files_list) if __name__ == '__main__': log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' logging.basicConfig(level=logging.INFO, format=log_fmt) logging.captureWarnings(True) main()
[ "import os\nfrom multiprocessing import Pool\nimport glob\n\nimport click\nimport logging\nimport pandas as pd\n\nfrom src.resampling.resampling import Resampler\n\n# Default paths\npath_in = 'data/hecktor_nii/'\npath_out = 'data/resampled/'\npath_bb = 'data/bbox.csv'\n\n\[email protected]()\[email protected]('input_folder', type=click.Path(exists=True), default=path_in)\[email protected]('output_folder', type=click.Path(), default=path_out)\[email protected]('bounding_boxes_file', type=click.Path(), default=path_bb)\[email protected]('--cores',\n type=click.INT,\n default=12,\n help='The number of workers for parallelization.')\[email protected]('--resampling',\n type=click.FLOAT,\n nargs=3,\n default=(1, 1, 1),\n help='Expect 3 positive floats describing the output '\n 'resolution of the resampling. To avoid resampling '\n 'on one or more dimension a value of -1 can be fed '\n 'e.g. --resampling 1.0 1.0 -1 will resample the x '\n 'and y axis at 1 mm/px and left the z axis untouched.')\[email protected]('--order',\n type=click.INT,\n nargs=1,\n default=3,\n help='The order of the spline interpolation used to resample')\ndef main(input_folder, output_folder, bounding_boxes_file, cores, resampling,\n order):\n \"\"\" This command line interface allows to resample NIFTI files within a\n given bounding box contain in BOUNDING_BOXES_FILE. The images are\n resampled with spline interpolation\n of degree --order (default=3) and the segmentation are resampled\n by nearest neighbor interpolation.\n\n INPUT_FOLDER is the path of the folder containing the NIFTI to\n resample.\n OUTPUT_FOLDER is the path of the folder where to store the\n resampled NIFTI files.\n BOUNDING_BOXES_FILE is the path of the .csv file containing the\n bounding boxes of each patient.\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('Resampling')\n\n if not os.path.exists(output_folder):\n os.mkdir(output_folder)\n print('resampling is {}'.format(str(resampling)))\n bb_df = pd.read_csv(bounding_boxes_file)\n bb_df = bb_df.set_index('PatientID')\n files_list = [\n f for f in glob.glob(input_folder + '/**/*.nii.gz', recursive=True)\n ]\n resampler = Resampler(bb_df, output_folder, order, resampling=resampling)\n with Pool(cores) as p:\n p.map(resampler, files_list)\n\n\nif __name__ == '__main__':\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n logging.captureWarnings(True)\n\n main()\n", "import os\nfrom multiprocessing import Pool\nimport glob\nimport click\nimport logging\nimport pandas as pd\nfrom src.resampling.resampling import Resampler\npath_in = 'data/hecktor_nii/'\npath_out = 'data/resampled/'\npath_bb = 'data/bbox.csv'\n\n\[email protected]()\[email protected]('input_folder', type=click.Path(exists=True), default=path_in)\[email protected]('output_folder', type=click.Path(), default=path_out)\[email protected]('bounding_boxes_file', type=click.Path(), default=path_bb)\[email protected]('--cores', type=click.INT, default=12, help=\n 'The number of workers for parallelization.')\[email protected]('--resampling', type=click.FLOAT, nargs=3, default=(1, 1, 1),\n help=\n 'Expect 3 positive floats describing the output resolution of the resampling. To avoid resampling on one or more dimension a value of -1 can be fed e.g. --resampling 1.0 1.0 -1 will resample the x and y axis at 1 mm/px and left the z axis untouched.'\n )\[email protected]('--order', type=click.INT, nargs=1, default=3, help=\n 'The order of the spline interpolation used to resample')\ndef main(input_folder, output_folder, bounding_boxes_file, cores,\n resampling, order):\n \"\"\" This command line interface allows to resample NIFTI files within a\n given bounding box contain in BOUNDING_BOXES_FILE. The images are\n resampled with spline interpolation\n of degree --order (default=3) and the segmentation are resampled\n by nearest neighbor interpolation.\n\n INPUT_FOLDER is the path of the folder containing the NIFTI to\n resample.\n OUTPUT_FOLDER is the path of the folder where to store the\n resampled NIFTI files.\n BOUNDING_BOXES_FILE is the path of the .csv file containing the\n bounding boxes of each patient.\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('Resampling')\n if not os.path.exists(output_folder):\n os.mkdir(output_folder)\n print('resampling is {}'.format(str(resampling)))\n bb_df = pd.read_csv(bounding_boxes_file)\n bb_df = bb_df.set_index('PatientID')\n files_list = [f for f in glob.glob(input_folder + '/**/*.nii.gz',\n recursive=True)]\n resampler = Resampler(bb_df, output_folder, order, resampling=resampling)\n with Pool(cores) as p:\n p.map(resampler, files_list)\n\n\nif __name__ == '__main__':\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n logging.captureWarnings(True)\n main()\n", "<import token>\npath_in = 'data/hecktor_nii/'\npath_out = 'data/resampled/'\npath_bb = 'data/bbox.csv'\n\n\[email protected]()\[email protected]('input_folder', type=click.Path(exists=True), default=path_in)\[email protected]('output_folder', type=click.Path(), default=path_out)\[email protected]('bounding_boxes_file', type=click.Path(), default=path_bb)\[email protected]('--cores', type=click.INT, default=12, help=\n 'The number of workers for parallelization.')\[email protected]('--resampling', type=click.FLOAT, nargs=3, default=(1, 1, 1),\n help=\n 'Expect 3 positive floats describing the output resolution of the resampling. To avoid resampling on one or more dimension a value of -1 can be fed e.g. --resampling 1.0 1.0 -1 will resample the x and y axis at 1 mm/px and left the z axis untouched.'\n )\[email protected]('--order', type=click.INT, nargs=1, default=3, help=\n 'The order of the spline interpolation used to resample')\ndef main(input_folder, output_folder, bounding_boxes_file, cores,\n resampling, order):\n \"\"\" This command line interface allows to resample NIFTI files within a\n given bounding box contain in BOUNDING_BOXES_FILE. The images are\n resampled with spline interpolation\n of degree --order (default=3) and the segmentation are resampled\n by nearest neighbor interpolation.\n\n INPUT_FOLDER is the path of the folder containing the NIFTI to\n resample.\n OUTPUT_FOLDER is the path of the folder where to store the\n resampled NIFTI files.\n BOUNDING_BOXES_FILE is the path of the .csv file containing the\n bounding boxes of each patient.\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('Resampling')\n if not os.path.exists(output_folder):\n os.mkdir(output_folder)\n print('resampling is {}'.format(str(resampling)))\n bb_df = pd.read_csv(bounding_boxes_file)\n bb_df = bb_df.set_index('PatientID')\n files_list = [f for f in glob.glob(input_folder + '/**/*.nii.gz',\n recursive=True)]\n resampler = Resampler(bb_df, output_folder, order, resampling=resampling)\n with Pool(cores) as p:\n p.map(resampler, files_list)\n\n\nif __name__ == '__main__':\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n logging.captureWarnings(True)\n main()\n", "<import token>\n<assignment token>\n\n\[email protected]()\[email protected]('input_folder', type=click.Path(exists=True), default=path_in)\[email protected]('output_folder', type=click.Path(), default=path_out)\[email protected]('bounding_boxes_file', type=click.Path(), default=path_bb)\[email protected]('--cores', type=click.INT, default=12, help=\n 'The number of workers for parallelization.')\[email protected]('--resampling', type=click.FLOAT, nargs=3, default=(1, 1, 1),\n help=\n 'Expect 3 positive floats describing the output resolution of the resampling. To avoid resampling on one or more dimension a value of -1 can be fed e.g. --resampling 1.0 1.0 -1 will resample the x and y axis at 1 mm/px and left the z axis untouched.'\n )\[email protected]('--order', type=click.INT, nargs=1, default=3, help=\n 'The order of the spline interpolation used to resample')\ndef main(input_folder, output_folder, bounding_boxes_file, cores,\n resampling, order):\n \"\"\" This command line interface allows to resample NIFTI files within a\n given bounding box contain in BOUNDING_BOXES_FILE. The images are\n resampled with spline interpolation\n of degree --order (default=3) and the segmentation are resampled\n by nearest neighbor interpolation.\n\n INPUT_FOLDER is the path of the folder containing the NIFTI to\n resample.\n OUTPUT_FOLDER is the path of the folder where to store the\n resampled NIFTI files.\n BOUNDING_BOXES_FILE is the path of the .csv file containing the\n bounding boxes of each patient.\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('Resampling')\n if not os.path.exists(output_folder):\n os.mkdir(output_folder)\n print('resampling is {}'.format(str(resampling)))\n bb_df = pd.read_csv(bounding_boxes_file)\n bb_df = bb_df.set_index('PatientID')\n files_list = [f for f in glob.glob(input_folder + '/**/*.nii.gz',\n recursive=True)]\n resampler = Resampler(bb_df, output_folder, order, resampling=resampling)\n with Pool(cores) as p:\n p.map(resampler, files_list)\n\n\nif __name__ == '__main__':\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n logging.captureWarnings(True)\n main()\n", "<import token>\n<assignment token>\n\n\[email protected]()\[email protected]('input_folder', type=click.Path(exists=True), default=path_in)\[email protected]('output_folder', type=click.Path(), default=path_out)\[email protected]('bounding_boxes_file', type=click.Path(), default=path_bb)\[email protected]('--cores', type=click.INT, default=12, help=\n 'The number of workers for parallelization.')\[email protected]('--resampling', type=click.FLOAT, nargs=3, default=(1, 1, 1),\n help=\n 'Expect 3 positive floats describing the output resolution of the resampling. To avoid resampling on one or more dimension a value of -1 can be fed e.g. --resampling 1.0 1.0 -1 will resample the x and y axis at 1 mm/px and left the z axis untouched.'\n )\[email protected]('--order', type=click.INT, nargs=1, default=3, help=\n 'The order of the spline interpolation used to resample')\ndef main(input_folder, output_folder, bounding_boxes_file, cores,\n resampling, order):\n \"\"\" This command line interface allows to resample NIFTI files within a\n given bounding box contain in BOUNDING_BOXES_FILE. The images are\n resampled with spline interpolation\n of degree --order (default=3) and the segmentation are resampled\n by nearest neighbor interpolation.\n\n INPUT_FOLDER is the path of the folder containing the NIFTI to\n resample.\n OUTPUT_FOLDER is the path of the folder where to store the\n resampled NIFTI files.\n BOUNDING_BOXES_FILE is the path of the .csv file containing the\n bounding boxes of each patient.\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('Resampling')\n if not os.path.exists(output_folder):\n os.mkdir(output_folder)\n print('resampling is {}'.format(str(resampling)))\n bb_df = pd.read_csv(bounding_boxes_file)\n bb_df = bb_df.set_index('PatientID')\n files_list = [f for f in glob.glob(input_folder + '/**/*.nii.gz',\n recursive=True)]\n resampler = Resampler(bb_df, output_folder, order, resampling=resampling)\n with Pool(cores) as p:\n p.map(resampler, files_list)\n\n\n<code token>\n", "<import token>\n<assignment token>\n<function token>\n<code token>\n" ]
false
316
d5beff74e3746c77cbaf6b8233b822ed1a86701e
# Generated by Django 2.2.2 on 2021-01-23 04:11 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('task', '0022_taskrecycle_create_date'), ] operations = [ migrations.RemoveField( model_name='ansibleextravars', name='playbook', ), migrations.RemoveField( model_name='ansibleplaybook', name='project', ), migrations.DeleteModel( name='CrontabTask', ), migrations.DeleteModel( name='TaskHistory', ), migrations.DeleteModel( name='TaskRecycle', ), migrations.RemoveField( model_name='taskscript', name='project', ), migrations.DeleteModel( name='AnsibleExtravars', ), migrations.DeleteModel( name='AnsiblePlaybook', ), migrations.DeleteModel( name='AnsibleProject', ), migrations.DeleteModel( name='TaskProject', ), migrations.DeleteModel( name='TaskScript', ), ]
[ "# Generated by Django 2.2.2 on 2021-01-23 04:11\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('task', '0022_taskrecycle_create_date'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='ansibleextravars',\n name='playbook',\n ),\n migrations.RemoveField(\n model_name='ansibleplaybook',\n name='project',\n ),\n migrations.DeleteModel(\n name='CrontabTask',\n ),\n migrations.DeleteModel(\n name='TaskHistory',\n ),\n migrations.DeleteModel(\n name='TaskRecycle',\n ),\n migrations.RemoveField(\n model_name='taskscript',\n name='project',\n ),\n migrations.DeleteModel(\n name='AnsibleExtravars',\n ),\n migrations.DeleteModel(\n name='AnsiblePlaybook',\n ),\n migrations.DeleteModel(\n name='AnsibleProject',\n ),\n migrations.DeleteModel(\n name='TaskProject',\n ),\n migrations.DeleteModel(\n name='TaskScript',\n ),\n ]\n", "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('task', '0022_taskrecycle_create_date')]\n operations = [migrations.RemoveField(model_name='ansibleextravars',\n name='playbook'), migrations.RemoveField(model_name=\n 'ansibleplaybook', name='project'), migrations.DeleteModel(name=\n 'CrontabTask'), migrations.DeleteModel(name='TaskHistory'),\n migrations.DeleteModel(name='TaskRecycle'), migrations.RemoveField(\n model_name='taskscript', name='project'), migrations.DeleteModel(\n name='AnsibleExtravars'), migrations.DeleteModel(name=\n 'AnsiblePlaybook'), migrations.DeleteModel(name='AnsibleProject'),\n migrations.DeleteModel(name='TaskProject'), migrations.DeleteModel(\n name='TaskScript')]\n", "<import token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('task', '0022_taskrecycle_create_date')]\n operations = [migrations.RemoveField(model_name='ansibleextravars',\n name='playbook'), migrations.RemoveField(model_name=\n 'ansibleplaybook', name='project'), migrations.DeleteModel(name=\n 'CrontabTask'), migrations.DeleteModel(name='TaskHistory'),\n migrations.DeleteModel(name='TaskRecycle'), migrations.RemoveField(\n model_name='taskscript', name='project'), migrations.DeleteModel(\n name='AnsibleExtravars'), migrations.DeleteModel(name=\n 'AnsiblePlaybook'), migrations.DeleteModel(name='AnsibleProject'),\n migrations.DeleteModel(name='TaskProject'), migrations.DeleteModel(\n name='TaskScript')]\n", "<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n", "<import token>\n<class token>\n" ]
false
317
77763f501c6776969d2594f987e5d7ab7d4377fb
import time from PyQt5.QtCore import ( QThread, ) from common import attach_common from database_downloader import DatabaseDownload from ai_list_memorize import MemorizeList from ai_list_morpheme import MorphemeList from ai_list_ngram import NgramList from ai_list_none import NoneList from ai_bot_memorize import MemorizeBot from ai_bot_morpheme import MorphemeBot from ai_bot_ngram import NgramBot from ai_bot_none import NoneBot @attach_common class TalkBotThread(QThread): parent = None bot = None lister = None text_target = '' tokens_start_of_text = [] tokens_of_text = [] def run(self): self.start_database() self.update_bot_type() self.start_main_loop() def start_database(self): if self.config.ENABLED_DOWNLOAD: DatabaseDownload.remove_tempfiles() DatabaseDownload.set_url('') DatabaseDownload.do_download_html() else: DatabaseDownload.do_not_download_html() self.text_target = DatabaseDownload().get_outcome() def update_bot_type(self): parent = self.parent parent.is_bot_ready = False parent.show_bot_not_ready_msg() self.type_bot = parent.type_bot self.select_token_list() self.select_bot() parent.bot = self.bot parent.lister = self.lister parent.is_bot_ready = True parent.show_bot_ready_msg() self.output_bot_type() def output_bot_type(self): parent = self.parent msgs = ( 'TalkBot:', ' id: {}'.format(parent.type_bot), ' bot: {}'.format(parent.bot.__class__.__name__), ' lister: {}'.format(parent.lister.__class__.__name__), ' tokens: {}'.format(str(parent.lister.get_token_list())[:60]), ) for msg in msgs: self.logger.w(msg) def select_token_list(self): if self.type_bot == 0: self.lister = NoneList( num_of_gram=self.config.DISABLE_NGRAM, text_target=self.text_target, ) self.tokens_start_of_text = self.lister.get_starting_token_list() self.tokens_of_text = self.lister.get_token_list() return if self.type_bot == 1: self.lister = NgramList( num_of_gram=3, text_target=self.text_target, ) self.tokens_start_of_text = self.lister.get_starting_token_list() self.tokens_of_text = self.lister.get_token_list() return if self.type_bot == 2: self.lister = MorphemeList( num_of_gram=self.config.DISABLE_NGRAM, text_target=self.text_target, ) self.tokens_start_of_text = self.lister.get_starting_token_list() self.tokens_of_text = self.lister.get_token_list() return if self.type_bot == 3: self.lister = MemorizeList( num_of_gram=self.config.DISABLE_NGRAM, text_target=self.text_target, ) self.tokens_start_of_text = self.lister.get_starting_token_list() self.tokens_of_text = self.lister.get_token_list() return err = self.type_bot raise Exception(err) def select_bot(self): if self.type_bot == 0: self.bot = NoneBot( starting_token_list=self.tokens_start_of_text, token_list=self.tokens_of_text, ) return if self.type_bot == 1: self.bot = NgramBot( starting_token_list=self.tokens_start_of_text, token_list=self.tokens_of_text, ) return if self.type_bot == 2: self.bot = MorphemeBot( starting_token_list=self.tokens_start_of_text, token_list=self.tokens_of_text, ) return if self.type_bot == 3: self.bot = MemorizeBot( starting_token_list=self.tokens_start_of_text, token_list=self.tokens_of_text, ) return err = self.type_bot raise Exception(err) def start_main_loop(self): parent = self.parent while True: time.sleep(0.2) if parent.is_app_close: break if parent.type_bot != self.type_bot: self.update_bot_type() continue parent.update_bot_msg_to_proper_latest_status() msg = 'Stopped the database thread!' self.logger.w(msg) if __name__ == "__main__": from gui_talkbot import MainWindow TestClass = MainWindow import sys from PyQt5.QtWidgets import QApplication qapp = QApplication(sys.argv) window = TestClass() window.show() code = qapp.exec() sys.exit(code)
[ "import time\nfrom PyQt5.QtCore import (\n QThread,\n)\nfrom common import attach_common\nfrom database_downloader import DatabaseDownload\nfrom ai_list_memorize import MemorizeList\nfrom ai_list_morpheme import MorphemeList\nfrom ai_list_ngram import NgramList\nfrom ai_list_none import NoneList\nfrom ai_bot_memorize import MemorizeBot\nfrom ai_bot_morpheme import MorphemeBot\nfrom ai_bot_ngram import NgramBot\nfrom ai_bot_none import NoneBot\n\n\n@attach_common\nclass TalkBotThread(QThread):\n parent = None\n bot = None\n lister = None\n\n text_target = ''\n tokens_start_of_text = []\n tokens_of_text = []\n\n def run(self):\n self.start_database()\n self.update_bot_type()\n self.start_main_loop()\n\n def start_database(self):\n if self.config.ENABLED_DOWNLOAD:\n DatabaseDownload.remove_tempfiles()\n DatabaseDownload.set_url('')\n DatabaseDownload.do_download_html()\n else:\n DatabaseDownload.do_not_download_html()\n self.text_target = DatabaseDownload().get_outcome()\n\n def update_bot_type(self):\n parent = self.parent\n parent.is_bot_ready = False\n parent.show_bot_not_ready_msg()\n\n self.type_bot = parent.type_bot\n self.select_token_list()\n self.select_bot()\n\n parent.bot = self.bot\n parent.lister = self.lister\n parent.is_bot_ready = True\n parent.show_bot_ready_msg()\n self.output_bot_type()\n\n def output_bot_type(self):\n parent = self.parent\n msgs = (\n 'TalkBot:',\n ' id: {}'.format(parent.type_bot),\n ' bot: {}'.format(parent.bot.__class__.__name__),\n ' lister: {}'.format(parent.lister.__class__.__name__),\n ' tokens: {}'.format(str(parent.lister.get_token_list())[:60]),\n )\n for msg in msgs:\n self.logger.w(msg)\n\n def select_token_list(self):\n if self.type_bot == 0:\n self.lister = NoneList(\n num_of_gram=self.config.DISABLE_NGRAM,\n text_target=self.text_target,\n )\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n\n if self.type_bot == 1:\n self.lister = NgramList(\n num_of_gram=3,\n text_target=self.text_target,\n )\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n\n if self.type_bot == 2:\n self.lister = MorphemeList(\n num_of_gram=self.config.DISABLE_NGRAM,\n text_target=self.text_target,\n )\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n\n if self.type_bot == 3:\n self.lister = MemorizeList(\n num_of_gram=self.config.DISABLE_NGRAM,\n text_target=self.text_target,\n )\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n\n err = self.type_bot\n raise Exception(err)\n\n def select_bot(self):\n if self.type_bot == 0:\n self.bot = NoneBot(\n starting_token_list=self.tokens_start_of_text,\n token_list=self.tokens_of_text,\n )\n return\n\n if self.type_bot == 1:\n self.bot = NgramBot(\n starting_token_list=self.tokens_start_of_text,\n token_list=self.tokens_of_text,\n )\n return\n\n if self.type_bot == 2:\n self.bot = MorphemeBot(\n starting_token_list=self.tokens_start_of_text,\n token_list=self.tokens_of_text,\n )\n return\n\n if self.type_bot == 3:\n self.bot = MemorizeBot(\n starting_token_list=self.tokens_start_of_text,\n token_list=self.tokens_of_text,\n )\n return\n\n err = self.type_bot\n raise Exception(err)\n\n def start_main_loop(self):\n parent = self.parent\n while True:\n time.sleep(0.2)\n\n if parent.is_app_close:\n break\n\n if parent.type_bot != self.type_bot:\n self.update_bot_type()\n continue\n\n parent.update_bot_msg_to_proper_latest_status()\n\n msg = 'Stopped the database thread!'\n self.logger.w(msg)\n\n\nif __name__ == \"__main__\":\n from gui_talkbot import MainWindow\n TestClass = MainWindow\n\n import sys\n from PyQt5.QtWidgets import QApplication\n qapp = QApplication(sys.argv)\n window = TestClass()\n window.show()\n code = qapp.exec()\n sys.exit(code)\n", "import time\nfrom PyQt5.QtCore import QThread\nfrom common import attach_common\nfrom database_downloader import DatabaseDownload\nfrom ai_list_memorize import MemorizeList\nfrom ai_list_morpheme import MorphemeList\nfrom ai_list_ngram import NgramList\nfrom ai_list_none import NoneList\nfrom ai_bot_memorize import MemorizeBot\nfrom ai_bot_morpheme import MorphemeBot\nfrom ai_bot_ngram import NgramBot\nfrom ai_bot_none import NoneBot\n\n\n@attach_common\nclass TalkBotThread(QThread):\n parent = None\n bot = None\n lister = None\n text_target = ''\n tokens_start_of_text = []\n tokens_of_text = []\n\n def run(self):\n self.start_database()\n self.update_bot_type()\n self.start_main_loop()\n\n def start_database(self):\n if self.config.ENABLED_DOWNLOAD:\n DatabaseDownload.remove_tempfiles()\n DatabaseDownload.set_url('')\n DatabaseDownload.do_download_html()\n else:\n DatabaseDownload.do_not_download_html()\n self.text_target = DatabaseDownload().get_outcome()\n\n def update_bot_type(self):\n parent = self.parent\n parent.is_bot_ready = False\n parent.show_bot_not_ready_msg()\n self.type_bot = parent.type_bot\n self.select_token_list()\n self.select_bot()\n parent.bot = self.bot\n parent.lister = self.lister\n parent.is_bot_ready = True\n parent.show_bot_ready_msg()\n self.output_bot_type()\n\n def output_bot_type(self):\n parent = self.parent\n msgs = 'TalkBot:', ' id: {}'.format(parent.type_bot\n ), ' bot: {}'.format(parent.bot.__class__.__name__\n ), ' lister: {}'.format(parent.lister.__class__.__name__\n ), ' tokens: {}'.format(str(parent.lister.get_token_list())[:60]\n )\n for msg in msgs:\n self.logger.w(msg)\n\n def select_token_list(self):\n if self.type_bot == 0:\n self.lister = NoneList(num_of_gram=self.config.DISABLE_NGRAM,\n text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 1:\n self.lister = NgramList(num_of_gram=3, text_target=self.text_target\n )\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 2:\n self.lister = MorphemeList(num_of_gram=self.config.\n DISABLE_NGRAM, text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 3:\n self.lister = MemorizeList(num_of_gram=self.config.\n DISABLE_NGRAM, text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n err = self.type_bot\n raise Exception(err)\n\n def select_bot(self):\n if self.type_bot == 0:\n self.bot = NoneBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n if self.type_bot == 1:\n self.bot = NgramBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n if self.type_bot == 2:\n self.bot = MorphemeBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n if self.type_bot == 3:\n self.bot = MemorizeBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n err = self.type_bot\n raise Exception(err)\n\n def start_main_loop(self):\n parent = self.parent\n while True:\n time.sleep(0.2)\n if parent.is_app_close:\n break\n if parent.type_bot != self.type_bot:\n self.update_bot_type()\n continue\n parent.update_bot_msg_to_proper_latest_status()\n msg = 'Stopped the database thread!'\n self.logger.w(msg)\n\n\nif __name__ == '__main__':\n from gui_talkbot import MainWindow\n TestClass = MainWindow\n import sys\n from PyQt5.QtWidgets import QApplication\n qapp = QApplication(sys.argv)\n window = TestClass()\n window.show()\n code = qapp.exec()\n sys.exit(code)\n", "<import token>\n\n\n@attach_common\nclass TalkBotThread(QThread):\n parent = None\n bot = None\n lister = None\n text_target = ''\n tokens_start_of_text = []\n tokens_of_text = []\n\n def run(self):\n self.start_database()\n self.update_bot_type()\n self.start_main_loop()\n\n def start_database(self):\n if self.config.ENABLED_DOWNLOAD:\n DatabaseDownload.remove_tempfiles()\n DatabaseDownload.set_url('')\n DatabaseDownload.do_download_html()\n else:\n DatabaseDownload.do_not_download_html()\n self.text_target = DatabaseDownload().get_outcome()\n\n def update_bot_type(self):\n parent = self.parent\n parent.is_bot_ready = False\n parent.show_bot_not_ready_msg()\n self.type_bot = parent.type_bot\n self.select_token_list()\n self.select_bot()\n parent.bot = self.bot\n parent.lister = self.lister\n parent.is_bot_ready = True\n parent.show_bot_ready_msg()\n self.output_bot_type()\n\n def output_bot_type(self):\n parent = self.parent\n msgs = 'TalkBot:', ' id: {}'.format(parent.type_bot\n ), ' bot: {}'.format(parent.bot.__class__.__name__\n ), ' lister: {}'.format(parent.lister.__class__.__name__\n ), ' tokens: {}'.format(str(parent.lister.get_token_list())[:60]\n )\n for msg in msgs:\n self.logger.w(msg)\n\n def select_token_list(self):\n if self.type_bot == 0:\n self.lister = NoneList(num_of_gram=self.config.DISABLE_NGRAM,\n text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 1:\n self.lister = NgramList(num_of_gram=3, text_target=self.text_target\n )\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 2:\n self.lister = MorphemeList(num_of_gram=self.config.\n DISABLE_NGRAM, text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 3:\n self.lister = MemorizeList(num_of_gram=self.config.\n DISABLE_NGRAM, text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n err = self.type_bot\n raise Exception(err)\n\n def select_bot(self):\n if self.type_bot == 0:\n self.bot = NoneBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n if self.type_bot == 1:\n self.bot = NgramBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n if self.type_bot == 2:\n self.bot = MorphemeBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n if self.type_bot == 3:\n self.bot = MemorizeBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n err = self.type_bot\n raise Exception(err)\n\n def start_main_loop(self):\n parent = self.parent\n while True:\n time.sleep(0.2)\n if parent.is_app_close:\n break\n if parent.type_bot != self.type_bot:\n self.update_bot_type()\n continue\n parent.update_bot_msg_to_proper_latest_status()\n msg = 'Stopped the database thread!'\n self.logger.w(msg)\n\n\nif __name__ == '__main__':\n from gui_talkbot import MainWindow\n TestClass = MainWindow\n import sys\n from PyQt5.QtWidgets import QApplication\n qapp = QApplication(sys.argv)\n window = TestClass()\n window.show()\n code = qapp.exec()\n sys.exit(code)\n", "<import token>\n\n\n@attach_common\nclass TalkBotThread(QThread):\n parent = None\n bot = None\n lister = None\n text_target = ''\n tokens_start_of_text = []\n tokens_of_text = []\n\n def run(self):\n self.start_database()\n self.update_bot_type()\n self.start_main_loop()\n\n def start_database(self):\n if self.config.ENABLED_DOWNLOAD:\n DatabaseDownload.remove_tempfiles()\n DatabaseDownload.set_url('')\n DatabaseDownload.do_download_html()\n else:\n DatabaseDownload.do_not_download_html()\n self.text_target = DatabaseDownload().get_outcome()\n\n def update_bot_type(self):\n parent = self.parent\n parent.is_bot_ready = False\n parent.show_bot_not_ready_msg()\n self.type_bot = parent.type_bot\n self.select_token_list()\n self.select_bot()\n parent.bot = self.bot\n parent.lister = self.lister\n parent.is_bot_ready = True\n parent.show_bot_ready_msg()\n self.output_bot_type()\n\n def output_bot_type(self):\n parent = self.parent\n msgs = 'TalkBot:', ' id: {}'.format(parent.type_bot\n ), ' bot: {}'.format(parent.bot.__class__.__name__\n ), ' lister: {}'.format(parent.lister.__class__.__name__\n ), ' tokens: {}'.format(str(parent.lister.get_token_list())[:60]\n )\n for msg in msgs:\n self.logger.w(msg)\n\n def select_token_list(self):\n if self.type_bot == 0:\n self.lister = NoneList(num_of_gram=self.config.DISABLE_NGRAM,\n text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 1:\n self.lister = NgramList(num_of_gram=3, text_target=self.text_target\n )\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 2:\n self.lister = MorphemeList(num_of_gram=self.config.\n DISABLE_NGRAM, text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 3:\n self.lister = MemorizeList(num_of_gram=self.config.\n DISABLE_NGRAM, text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n err = self.type_bot\n raise Exception(err)\n\n def select_bot(self):\n if self.type_bot == 0:\n self.bot = NoneBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n if self.type_bot == 1:\n self.bot = NgramBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n if self.type_bot == 2:\n self.bot = MorphemeBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n if self.type_bot == 3:\n self.bot = MemorizeBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n err = self.type_bot\n raise Exception(err)\n\n def start_main_loop(self):\n parent = self.parent\n while True:\n time.sleep(0.2)\n if parent.is_app_close:\n break\n if parent.type_bot != self.type_bot:\n self.update_bot_type()\n continue\n parent.update_bot_msg_to_proper_latest_status()\n msg = 'Stopped the database thread!'\n self.logger.w(msg)\n\n\n<code token>\n", "<import token>\n\n\n@attach_common\nclass TalkBotThread(QThread):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def run(self):\n self.start_database()\n self.update_bot_type()\n self.start_main_loop()\n\n def start_database(self):\n if self.config.ENABLED_DOWNLOAD:\n DatabaseDownload.remove_tempfiles()\n DatabaseDownload.set_url('')\n DatabaseDownload.do_download_html()\n else:\n DatabaseDownload.do_not_download_html()\n self.text_target = DatabaseDownload().get_outcome()\n\n def update_bot_type(self):\n parent = self.parent\n parent.is_bot_ready = False\n parent.show_bot_not_ready_msg()\n self.type_bot = parent.type_bot\n self.select_token_list()\n self.select_bot()\n parent.bot = self.bot\n parent.lister = self.lister\n parent.is_bot_ready = True\n parent.show_bot_ready_msg()\n self.output_bot_type()\n\n def output_bot_type(self):\n parent = self.parent\n msgs = 'TalkBot:', ' id: {}'.format(parent.type_bot\n ), ' bot: {}'.format(parent.bot.__class__.__name__\n ), ' lister: {}'.format(parent.lister.__class__.__name__\n ), ' tokens: {}'.format(str(parent.lister.get_token_list())[:60]\n )\n for msg in msgs:\n self.logger.w(msg)\n\n def select_token_list(self):\n if self.type_bot == 0:\n self.lister = NoneList(num_of_gram=self.config.DISABLE_NGRAM,\n text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 1:\n self.lister = NgramList(num_of_gram=3, text_target=self.text_target\n )\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 2:\n self.lister = MorphemeList(num_of_gram=self.config.\n DISABLE_NGRAM, text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 3:\n self.lister = MemorizeList(num_of_gram=self.config.\n DISABLE_NGRAM, text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n err = self.type_bot\n raise Exception(err)\n\n def select_bot(self):\n if self.type_bot == 0:\n self.bot = NoneBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n if self.type_bot == 1:\n self.bot = NgramBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n if self.type_bot == 2:\n self.bot = MorphemeBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n if self.type_bot == 3:\n self.bot = MemorizeBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n err = self.type_bot\n raise Exception(err)\n\n def start_main_loop(self):\n parent = self.parent\n while True:\n time.sleep(0.2)\n if parent.is_app_close:\n break\n if parent.type_bot != self.type_bot:\n self.update_bot_type()\n continue\n parent.update_bot_msg_to_proper_latest_status()\n msg = 'Stopped the database thread!'\n self.logger.w(msg)\n\n\n<code token>\n", "<import token>\n\n\n@attach_common\nclass TalkBotThread(QThread):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def run(self):\n self.start_database()\n self.update_bot_type()\n self.start_main_loop()\n\n def start_database(self):\n if self.config.ENABLED_DOWNLOAD:\n DatabaseDownload.remove_tempfiles()\n DatabaseDownload.set_url('')\n DatabaseDownload.do_download_html()\n else:\n DatabaseDownload.do_not_download_html()\n self.text_target = DatabaseDownload().get_outcome()\n\n def update_bot_type(self):\n parent = self.parent\n parent.is_bot_ready = False\n parent.show_bot_not_ready_msg()\n self.type_bot = parent.type_bot\n self.select_token_list()\n self.select_bot()\n parent.bot = self.bot\n parent.lister = self.lister\n parent.is_bot_ready = True\n parent.show_bot_ready_msg()\n self.output_bot_type()\n\n def output_bot_type(self):\n parent = self.parent\n msgs = 'TalkBot:', ' id: {}'.format(parent.type_bot\n ), ' bot: {}'.format(parent.bot.__class__.__name__\n ), ' lister: {}'.format(parent.lister.__class__.__name__\n ), ' tokens: {}'.format(str(parent.lister.get_token_list())[:60]\n )\n for msg in msgs:\n self.logger.w(msg)\n\n def select_token_list(self):\n if self.type_bot == 0:\n self.lister = NoneList(num_of_gram=self.config.DISABLE_NGRAM,\n text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 1:\n self.lister = NgramList(num_of_gram=3, text_target=self.text_target\n )\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 2:\n self.lister = MorphemeList(num_of_gram=self.config.\n DISABLE_NGRAM, text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 3:\n self.lister = MemorizeList(num_of_gram=self.config.\n DISABLE_NGRAM, text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n err = self.type_bot\n raise Exception(err)\n <function token>\n\n def start_main_loop(self):\n parent = self.parent\n while True:\n time.sleep(0.2)\n if parent.is_app_close:\n break\n if parent.type_bot != self.type_bot:\n self.update_bot_type()\n continue\n parent.update_bot_msg_to_proper_latest_status()\n msg = 'Stopped the database thread!'\n self.logger.w(msg)\n\n\n<code token>\n", "<import token>\n\n\n@attach_common\nclass TalkBotThread(QThread):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def run(self):\n self.start_database()\n self.update_bot_type()\n self.start_main_loop()\n\n def start_database(self):\n if self.config.ENABLED_DOWNLOAD:\n DatabaseDownload.remove_tempfiles()\n DatabaseDownload.set_url('')\n DatabaseDownload.do_download_html()\n else:\n DatabaseDownload.do_not_download_html()\n self.text_target = DatabaseDownload().get_outcome()\n\n def update_bot_type(self):\n parent = self.parent\n parent.is_bot_ready = False\n parent.show_bot_not_ready_msg()\n self.type_bot = parent.type_bot\n self.select_token_list()\n self.select_bot()\n parent.bot = self.bot\n parent.lister = self.lister\n parent.is_bot_ready = True\n parent.show_bot_ready_msg()\n self.output_bot_type()\n <function token>\n\n def select_token_list(self):\n if self.type_bot == 0:\n self.lister = NoneList(num_of_gram=self.config.DISABLE_NGRAM,\n text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 1:\n self.lister = NgramList(num_of_gram=3, text_target=self.text_target\n )\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 2:\n self.lister = MorphemeList(num_of_gram=self.config.\n DISABLE_NGRAM, text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 3:\n self.lister = MemorizeList(num_of_gram=self.config.\n DISABLE_NGRAM, text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n err = self.type_bot\n raise Exception(err)\n <function token>\n\n def start_main_loop(self):\n parent = self.parent\n while True:\n time.sleep(0.2)\n if parent.is_app_close:\n break\n if parent.type_bot != self.type_bot:\n self.update_bot_type()\n continue\n parent.update_bot_msg_to_proper_latest_status()\n msg = 'Stopped the database thread!'\n self.logger.w(msg)\n\n\n<code token>\n", "<import token>\n\n\n@attach_common\nclass TalkBotThread(QThread):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def start_database(self):\n if self.config.ENABLED_DOWNLOAD:\n DatabaseDownload.remove_tempfiles()\n DatabaseDownload.set_url('')\n DatabaseDownload.do_download_html()\n else:\n DatabaseDownload.do_not_download_html()\n self.text_target = DatabaseDownload().get_outcome()\n\n def update_bot_type(self):\n parent = self.parent\n parent.is_bot_ready = False\n parent.show_bot_not_ready_msg()\n self.type_bot = parent.type_bot\n self.select_token_list()\n self.select_bot()\n parent.bot = self.bot\n parent.lister = self.lister\n parent.is_bot_ready = True\n parent.show_bot_ready_msg()\n self.output_bot_type()\n <function token>\n\n def select_token_list(self):\n if self.type_bot == 0:\n self.lister = NoneList(num_of_gram=self.config.DISABLE_NGRAM,\n text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 1:\n self.lister = NgramList(num_of_gram=3, text_target=self.text_target\n )\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 2:\n self.lister = MorphemeList(num_of_gram=self.config.\n DISABLE_NGRAM, text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 3:\n self.lister = MemorizeList(num_of_gram=self.config.\n DISABLE_NGRAM, text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n err = self.type_bot\n raise Exception(err)\n <function token>\n\n def start_main_loop(self):\n parent = self.parent\n while True:\n time.sleep(0.2)\n if parent.is_app_close:\n break\n if parent.type_bot != self.type_bot:\n self.update_bot_type()\n continue\n parent.update_bot_msg_to_proper_latest_status()\n msg = 'Stopped the database thread!'\n self.logger.w(msg)\n\n\n<code token>\n", "<import token>\n\n\n@attach_common\nclass TalkBotThread(QThread):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def start_database(self):\n if self.config.ENABLED_DOWNLOAD:\n DatabaseDownload.remove_tempfiles()\n DatabaseDownload.set_url('')\n DatabaseDownload.do_download_html()\n else:\n DatabaseDownload.do_not_download_html()\n self.text_target = DatabaseDownload().get_outcome()\n <function token>\n <function token>\n\n def select_token_list(self):\n if self.type_bot == 0:\n self.lister = NoneList(num_of_gram=self.config.DISABLE_NGRAM,\n text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 1:\n self.lister = NgramList(num_of_gram=3, text_target=self.text_target\n )\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 2:\n self.lister = MorphemeList(num_of_gram=self.config.\n DISABLE_NGRAM, text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 3:\n self.lister = MemorizeList(num_of_gram=self.config.\n DISABLE_NGRAM, text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n err = self.type_bot\n raise Exception(err)\n <function token>\n\n def start_main_loop(self):\n parent = self.parent\n while True:\n time.sleep(0.2)\n if parent.is_app_close:\n break\n if parent.type_bot != self.type_bot:\n self.update_bot_type()\n continue\n parent.update_bot_msg_to_proper_latest_status()\n msg = 'Stopped the database thread!'\n self.logger.w(msg)\n\n\n<code token>\n", "<import token>\n\n\n@attach_common\nclass TalkBotThread(QThread):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def start_database(self):\n if self.config.ENABLED_DOWNLOAD:\n DatabaseDownload.remove_tempfiles()\n DatabaseDownload.set_url('')\n DatabaseDownload.do_download_html()\n else:\n DatabaseDownload.do_not_download_html()\n self.text_target = DatabaseDownload().get_outcome()\n <function token>\n <function token>\n <function token>\n <function token>\n\n def start_main_loop(self):\n parent = self.parent\n while True:\n time.sleep(0.2)\n if parent.is_app_close:\n break\n if parent.type_bot != self.type_bot:\n self.update_bot_type()\n continue\n parent.update_bot_msg_to_proper_latest_status()\n msg = 'Stopped the database thread!'\n self.logger.w(msg)\n\n\n<code token>\n", "<import token>\n\n\n@attach_common\nclass TalkBotThread(QThread):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def start_database(self):\n if self.config.ENABLED_DOWNLOAD:\n DatabaseDownload.remove_tempfiles()\n DatabaseDownload.set_url('')\n DatabaseDownload.do_download_html()\n else:\n DatabaseDownload.do_not_download_html()\n self.text_target = DatabaseDownload().get_outcome()\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n", "<import token>\n\n\n@attach_common\nclass TalkBotThread(QThread):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n", "<import token>\n<class token>\n<code token>\n" ]
false
318
d6a760774b45454c959c2932d7b28deee7f81872
# SPDX-License-Identifier: Apache-2.0 # Licensed to the Ed-Fi Alliance under one or more agreements. # The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0. # See the LICENSE and NOTICES files in the project root for more information. import json from typing import Dict from pandas import DataFrame, concat, Series from edfi_google_classroom_extractor.mapping.constants import SOURCE_SYSTEM ACTIVITY_TYPE_STATE = "Submission State Change" ACTIVITY_TYPE_GRADE = "Submission Grade Change" def submissions_to_user_submission_activities_dfs( submissions_df: DataFrame, ) -> Dict[str, DataFrame]: """ Convert a Submission API DataFrame to a Dict of UserActivity UDM DataFrames grouped by source system section id. Parameters ---------- submissions_df: DataFrame is a Submission API DataFrame Returns ------- Dict[str, DataFrame] LMS UDM UserActivity DataFrames grouped by source system section id Notes ----- UserActivity DataFrame columns are: ActivityDateTime: The date/time the activity occurred ActivityStatus: The activity status ActivityTimeInMinutes: The total activity time in minutes ActivityType: The type of activity, here "Submission" or "Grade" AssignmentIdentifier: A unique numeric identifier assigned to the assignment Content: Content associated with the activity LMSSectionIdentifier: A unique numeric identifier assigned to the section SourceSystem: The system code or name providing the user activity data SourceSystemIdentifier: A unique number or alphanumeric code assigned to a user activity by the source system LMSUserIdentifier: A unique numeric identifier assigned to the user CreateDate: Date this record was created in the extractor LastModifiedDate: Date this record was last updated in the extractor """ assert "submissionHistory" in submissions_df.columns assert "id" in submissions_df.columns assert "courseId" in submissions_df.columns assert "courseWorkId" in submissions_df.columns # convert json-like submissionHistory string to list of dicts submissions_df["submissionHistory"] = submissions_df["submissionHistory"].apply(lambda json_like: json.loads(json_like.replace("'", '"'))) submissions_df["AssignmentIdentifier"] = submissions_df[ ["courseId", "courseWorkId"] ].agg("-".join, axis=1) submissions_df = submissions_df[["id", "courseId", "courseWorkId", "submissionHistory", "AssignmentIdentifier", "CreateDate", "LastModifiedDate"]] # explode submissionHistory lists into rows with other columns duplicated history_df = submissions_df.explode(column="submissionHistory") # type: ignore # expand submissionHistory dicts (stateHistory and gradeHistory) into their own columns history_df = history_df["submissionHistory"].apply(Series).merge(history_df, left_index=True, right_index=True, how='outer') history_df.drop(columns=["submissionHistory"], inplace=True) # expand stateHistory (can assume exists, should always have at least one "CREATED" entry) user_submission_df = concat([history_df, history_df["stateHistory"].apply(Series)], axis=1) user_submission_df.dropna(subset=["stateHistory"], inplace=True) # enrich stateHistory user_submission_df["SourceSystemIdentifier"] = "S-" + user_submission_df[ ["courseId", "courseWorkId", "id", "stateTimestamp"] ].agg("-".join, axis=1) user_submission_df = user_submission_df[ [ "SourceSystemIdentifier", "AssignmentIdentifier", "stateTimestamp", "state", "courseId", "actorUserId", "CreateDate", "LastModifiedDate" ] ] user_submission_df = user_submission_df.rename( columns={ "stateTimestamp": "ActivityDateTime", "state": "ActivityStatus", "courseId": "LMSSectionIdentifier", "actorUserId": "LMSUserIdentifier", } ) user_submission_df["ActivityType"] = ACTIVITY_TYPE_STATE # expand gradeHistory if exists if "gradeHistory" in history_df: grade_history_df = concat([history_df, history_df["gradeHistory"].apply(Series)], axis=1) grade_history_df.dropna(subset=["gradeHistory"], inplace=True) # enrich gradeHistory grade_history_df["SourceSystemIdentifier"] = "G-" + grade_history_df[ ["courseId", "courseWorkId", "id", "gradeTimestamp"] ].agg("-".join, axis=1) grade_history_df = grade_history_df[ [ "SourceSystemIdentifier", "AssignmentIdentifier", "gradeTimestamp", "gradeChangeType", "courseId", "actorUserId", "CreateDate", "LastModifiedDate" ] ] grade_history_df = grade_history_df.rename( columns={ "gradeTimestamp": "ActivityDateTime", "gradeChangeType": "ActivityStatus", "courseId": "LMSSectionIdentifier", "actorUserId": "LMSUserIdentifier", } ) grade_history_df["ActivityType"] = ACTIVITY_TYPE_GRADE # combine with stateHistory user_submission_df = user_submission_df.append(grade_history_df) # teacher actions can show up on student histories and vice-versa user_submission_df.drop_duplicates(subset=["SourceSystemIdentifier"], inplace=True) # finish with common columns user_submission_df["ActivityTimeInMinutes"] = "" user_submission_df["Content"] = "" user_submission_df["SourceSystem"] = SOURCE_SYSTEM user_submission_df["SourceCreateDate"] = "" # No create date available from API user_submission_df["SourceLastModifiedDate"] = "" # No modified date available from API # group by section id as a Dict of DataFrames result: Dict[str, DataFrame] = dict( tuple(user_submission_df.groupby(["LMSSectionIdentifier"])) ) return result
[ "# SPDX-License-Identifier: Apache-2.0\n# Licensed to the Ed-Fi Alliance under one or more agreements.\n# The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0.\n# See the LICENSE and NOTICES files in the project root for more information.\n\nimport json\nfrom typing import Dict\nfrom pandas import DataFrame, concat, Series\nfrom edfi_google_classroom_extractor.mapping.constants import SOURCE_SYSTEM\n\nACTIVITY_TYPE_STATE = \"Submission State Change\"\nACTIVITY_TYPE_GRADE = \"Submission Grade Change\"\n\n\ndef submissions_to_user_submission_activities_dfs(\n submissions_df: DataFrame,\n) -> Dict[str, DataFrame]:\n \"\"\"\n Convert a Submission API DataFrame to a Dict of UserActivity\n UDM DataFrames grouped by source system section id.\n\n Parameters\n ----------\n submissions_df: DataFrame\n is a Submission API DataFrame\n\n Returns\n -------\n Dict[str, DataFrame] LMS UDM UserActivity DataFrames\n grouped by source system section id\n\n Notes\n -----\n UserActivity DataFrame columns are:\n ActivityDateTime: The date/time the activity occurred\n ActivityStatus: The activity status\n ActivityTimeInMinutes: The total activity time in minutes\n ActivityType: The type of activity, here \"Submission\" or \"Grade\"\n AssignmentIdentifier: A unique numeric identifier assigned to the assignment\n Content: Content associated with the activity\n LMSSectionIdentifier: A unique numeric identifier assigned to the section\n SourceSystem: The system code or name providing the user activity data\n SourceSystemIdentifier: A unique number or alphanumeric code assigned to a\n user activity by the source system\n LMSUserIdentifier: A unique numeric identifier assigned to the user\n CreateDate: Date this record was created in the extractor\n LastModifiedDate: Date this record was last updated in the extractor\n \"\"\"\n assert \"submissionHistory\" in submissions_df.columns\n assert \"id\" in submissions_df.columns\n assert \"courseId\" in submissions_df.columns\n assert \"courseWorkId\" in submissions_df.columns\n\n # convert json-like submissionHistory string to list of dicts\n submissions_df[\"submissionHistory\"] = submissions_df[\"submissionHistory\"].apply(lambda json_like: json.loads(json_like.replace(\"'\", '\"')))\n submissions_df[\"AssignmentIdentifier\"] = submissions_df[\n [\"courseId\", \"courseWorkId\"]\n ].agg(\"-\".join, axis=1)\n\n submissions_df = submissions_df[[\"id\", \"courseId\", \"courseWorkId\", \"submissionHistory\", \"AssignmentIdentifier\", \"CreateDate\", \"LastModifiedDate\"]]\n\n # explode submissionHistory lists into rows with other columns duplicated\n history_df = submissions_df.explode(column=\"submissionHistory\") # type: ignore\n\n # expand submissionHistory dicts (stateHistory and gradeHistory) into their own columns\n history_df = history_df[\"submissionHistory\"].apply(Series).merge(history_df, left_index=True, right_index=True, how='outer')\n history_df.drop(columns=[\"submissionHistory\"], inplace=True)\n\n # expand stateHistory (can assume exists, should always have at least one \"CREATED\" entry)\n user_submission_df = concat([history_df, history_df[\"stateHistory\"].apply(Series)], axis=1)\n user_submission_df.dropna(subset=[\"stateHistory\"], inplace=True)\n\n # enrich stateHistory\n user_submission_df[\"SourceSystemIdentifier\"] = \"S-\" + user_submission_df[\n [\"courseId\", \"courseWorkId\", \"id\", \"stateTimestamp\"]\n ].agg(\"-\".join, axis=1)\n\n user_submission_df = user_submission_df[\n [\n \"SourceSystemIdentifier\",\n \"AssignmentIdentifier\",\n \"stateTimestamp\",\n \"state\",\n \"courseId\",\n \"actorUserId\",\n \"CreateDate\",\n \"LastModifiedDate\"\n ]\n ]\n\n user_submission_df = user_submission_df.rename(\n columns={\n \"stateTimestamp\": \"ActivityDateTime\",\n \"state\": \"ActivityStatus\",\n \"courseId\": \"LMSSectionIdentifier\",\n \"actorUserId\": \"LMSUserIdentifier\",\n }\n )\n\n user_submission_df[\"ActivityType\"] = ACTIVITY_TYPE_STATE\n\n # expand gradeHistory if exists\n if \"gradeHistory\" in history_df:\n grade_history_df = concat([history_df, history_df[\"gradeHistory\"].apply(Series)], axis=1)\n grade_history_df.dropna(subset=[\"gradeHistory\"], inplace=True)\n\n # enrich gradeHistory\n grade_history_df[\"SourceSystemIdentifier\"] = \"G-\" + grade_history_df[\n [\"courseId\", \"courseWorkId\", \"id\", \"gradeTimestamp\"]\n ].agg(\"-\".join, axis=1)\n\n grade_history_df = grade_history_df[\n [\n \"SourceSystemIdentifier\",\n \"AssignmentIdentifier\",\n \"gradeTimestamp\",\n \"gradeChangeType\",\n \"courseId\",\n \"actorUserId\",\n \"CreateDate\",\n \"LastModifiedDate\"\n ]\n ]\n\n grade_history_df = grade_history_df.rename(\n columns={\n \"gradeTimestamp\": \"ActivityDateTime\",\n \"gradeChangeType\": \"ActivityStatus\",\n \"courseId\": \"LMSSectionIdentifier\",\n \"actorUserId\": \"LMSUserIdentifier\",\n }\n )\n\n grade_history_df[\"ActivityType\"] = ACTIVITY_TYPE_GRADE\n\n # combine with stateHistory\n user_submission_df = user_submission_df.append(grade_history_df)\n\n # teacher actions can show up on student histories and vice-versa\n user_submission_df.drop_duplicates(subset=[\"SourceSystemIdentifier\"], inplace=True)\n\n # finish with common columns\n user_submission_df[\"ActivityTimeInMinutes\"] = \"\"\n user_submission_df[\"Content\"] = \"\"\n user_submission_df[\"SourceSystem\"] = SOURCE_SYSTEM\n user_submission_df[\"SourceCreateDate\"] = \"\" # No create date available from API\n user_submission_df[\"SourceLastModifiedDate\"] = \"\" # No modified date available from API\n\n # group by section id as a Dict of DataFrames\n result: Dict[str, DataFrame] = dict(\n tuple(user_submission_df.groupby([\"LMSSectionIdentifier\"]))\n )\n\n return result\n", "import json\nfrom typing import Dict\nfrom pandas import DataFrame, concat, Series\nfrom edfi_google_classroom_extractor.mapping.constants import SOURCE_SYSTEM\nACTIVITY_TYPE_STATE = 'Submission State Change'\nACTIVITY_TYPE_GRADE = 'Submission Grade Change'\n\n\ndef submissions_to_user_submission_activities_dfs(submissions_df: DataFrame\n ) ->Dict[str, DataFrame]:\n \"\"\"\n Convert a Submission API DataFrame to a Dict of UserActivity\n UDM DataFrames grouped by source system section id.\n\n Parameters\n ----------\n submissions_df: DataFrame\n is a Submission API DataFrame\n\n Returns\n -------\n Dict[str, DataFrame] LMS UDM UserActivity DataFrames\n grouped by source system section id\n\n Notes\n -----\n UserActivity DataFrame columns are:\n ActivityDateTime: The date/time the activity occurred\n ActivityStatus: The activity status\n ActivityTimeInMinutes: The total activity time in minutes\n ActivityType: The type of activity, here \"Submission\" or \"Grade\"\n AssignmentIdentifier: A unique numeric identifier assigned to the assignment\n Content: Content associated with the activity\n LMSSectionIdentifier: A unique numeric identifier assigned to the section\n SourceSystem: The system code or name providing the user activity data\n SourceSystemIdentifier: A unique number or alphanumeric code assigned to a\n user activity by the source system\n LMSUserIdentifier: A unique numeric identifier assigned to the user\n CreateDate: Date this record was created in the extractor\n LastModifiedDate: Date this record was last updated in the extractor\n \"\"\"\n assert 'submissionHistory' in submissions_df.columns\n assert 'id' in submissions_df.columns\n assert 'courseId' in submissions_df.columns\n assert 'courseWorkId' in submissions_df.columns\n submissions_df['submissionHistory'] = submissions_df['submissionHistory'\n ].apply(lambda json_like: json.loads(json_like.replace(\"'\", '\"')))\n submissions_df['AssignmentIdentifier'] = submissions_df[['courseId',\n 'courseWorkId']].agg('-'.join, axis=1)\n submissions_df = submissions_df[['id', 'courseId', 'courseWorkId',\n 'submissionHistory', 'AssignmentIdentifier', 'CreateDate',\n 'LastModifiedDate']]\n history_df = submissions_df.explode(column='submissionHistory')\n history_df = history_df['submissionHistory'].apply(Series).merge(history_df\n , left_index=True, right_index=True, how='outer')\n history_df.drop(columns=['submissionHistory'], inplace=True)\n user_submission_df = concat([history_df, history_df['stateHistory'].\n apply(Series)], axis=1)\n user_submission_df.dropna(subset=['stateHistory'], inplace=True)\n user_submission_df['SourceSystemIdentifier'] = 'S-' + user_submission_df[[\n 'courseId', 'courseWorkId', 'id', 'stateTimestamp']].agg('-'.join,\n axis=1)\n user_submission_df = user_submission_df[['SourceSystemIdentifier',\n 'AssignmentIdentifier', 'stateTimestamp', 'state', 'courseId',\n 'actorUserId', 'CreateDate', 'LastModifiedDate']]\n user_submission_df = user_submission_df.rename(columns={\n 'stateTimestamp': 'ActivityDateTime', 'state': 'ActivityStatus',\n 'courseId': 'LMSSectionIdentifier', 'actorUserId': 'LMSUserIdentifier'}\n )\n user_submission_df['ActivityType'] = ACTIVITY_TYPE_STATE\n if 'gradeHistory' in history_df:\n grade_history_df = concat([history_df, history_df['gradeHistory'].\n apply(Series)], axis=1)\n grade_history_df.dropna(subset=['gradeHistory'], inplace=True)\n grade_history_df['SourceSystemIdentifier'] = 'G-' + grade_history_df[[\n 'courseId', 'courseWorkId', 'id', 'gradeTimestamp']].agg('-'.\n join, axis=1)\n grade_history_df = grade_history_df[['SourceSystemIdentifier',\n 'AssignmentIdentifier', 'gradeTimestamp', 'gradeChangeType',\n 'courseId', 'actorUserId', 'CreateDate', 'LastModifiedDate']]\n grade_history_df = grade_history_df.rename(columns={\n 'gradeTimestamp': 'ActivityDateTime', 'gradeChangeType':\n 'ActivityStatus', 'courseId': 'LMSSectionIdentifier',\n 'actorUserId': 'LMSUserIdentifier'})\n grade_history_df['ActivityType'] = ACTIVITY_TYPE_GRADE\n user_submission_df = user_submission_df.append(grade_history_df)\n user_submission_df.drop_duplicates(subset=['SourceSystemIdentifier'],\n inplace=True)\n user_submission_df['ActivityTimeInMinutes'] = ''\n user_submission_df['Content'] = ''\n user_submission_df['SourceSystem'] = SOURCE_SYSTEM\n user_submission_df['SourceCreateDate'] = ''\n user_submission_df['SourceLastModifiedDate'] = ''\n result: Dict[str, DataFrame] = dict(tuple(user_submission_df.groupby([\n 'LMSSectionIdentifier'])))\n return result\n", "<import token>\nACTIVITY_TYPE_STATE = 'Submission State Change'\nACTIVITY_TYPE_GRADE = 'Submission Grade Change'\n\n\ndef submissions_to_user_submission_activities_dfs(submissions_df: DataFrame\n ) ->Dict[str, DataFrame]:\n \"\"\"\n Convert a Submission API DataFrame to a Dict of UserActivity\n UDM DataFrames grouped by source system section id.\n\n Parameters\n ----------\n submissions_df: DataFrame\n is a Submission API DataFrame\n\n Returns\n -------\n Dict[str, DataFrame] LMS UDM UserActivity DataFrames\n grouped by source system section id\n\n Notes\n -----\n UserActivity DataFrame columns are:\n ActivityDateTime: The date/time the activity occurred\n ActivityStatus: The activity status\n ActivityTimeInMinutes: The total activity time in minutes\n ActivityType: The type of activity, here \"Submission\" or \"Grade\"\n AssignmentIdentifier: A unique numeric identifier assigned to the assignment\n Content: Content associated with the activity\n LMSSectionIdentifier: A unique numeric identifier assigned to the section\n SourceSystem: The system code or name providing the user activity data\n SourceSystemIdentifier: A unique number or alphanumeric code assigned to a\n user activity by the source system\n LMSUserIdentifier: A unique numeric identifier assigned to the user\n CreateDate: Date this record was created in the extractor\n LastModifiedDate: Date this record was last updated in the extractor\n \"\"\"\n assert 'submissionHistory' in submissions_df.columns\n assert 'id' in submissions_df.columns\n assert 'courseId' in submissions_df.columns\n assert 'courseWorkId' in submissions_df.columns\n submissions_df['submissionHistory'] = submissions_df['submissionHistory'\n ].apply(lambda json_like: json.loads(json_like.replace(\"'\", '\"')))\n submissions_df['AssignmentIdentifier'] = submissions_df[['courseId',\n 'courseWorkId']].agg('-'.join, axis=1)\n submissions_df = submissions_df[['id', 'courseId', 'courseWorkId',\n 'submissionHistory', 'AssignmentIdentifier', 'CreateDate',\n 'LastModifiedDate']]\n history_df = submissions_df.explode(column='submissionHistory')\n history_df = history_df['submissionHistory'].apply(Series).merge(history_df\n , left_index=True, right_index=True, how='outer')\n history_df.drop(columns=['submissionHistory'], inplace=True)\n user_submission_df = concat([history_df, history_df['stateHistory'].\n apply(Series)], axis=1)\n user_submission_df.dropna(subset=['stateHistory'], inplace=True)\n user_submission_df['SourceSystemIdentifier'] = 'S-' + user_submission_df[[\n 'courseId', 'courseWorkId', 'id', 'stateTimestamp']].agg('-'.join,\n axis=1)\n user_submission_df = user_submission_df[['SourceSystemIdentifier',\n 'AssignmentIdentifier', 'stateTimestamp', 'state', 'courseId',\n 'actorUserId', 'CreateDate', 'LastModifiedDate']]\n user_submission_df = user_submission_df.rename(columns={\n 'stateTimestamp': 'ActivityDateTime', 'state': 'ActivityStatus',\n 'courseId': 'LMSSectionIdentifier', 'actorUserId': 'LMSUserIdentifier'}\n )\n user_submission_df['ActivityType'] = ACTIVITY_TYPE_STATE\n if 'gradeHistory' in history_df:\n grade_history_df = concat([history_df, history_df['gradeHistory'].\n apply(Series)], axis=1)\n grade_history_df.dropna(subset=['gradeHistory'], inplace=True)\n grade_history_df['SourceSystemIdentifier'] = 'G-' + grade_history_df[[\n 'courseId', 'courseWorkId', 'id', 'gradeTimestamp']].agg('-'.\n join, axis=1)\n grade_history_df = grade_history_df[['SourceSystemIdentifier',\n 'AssignmentIdentifier', 'gradeTimestamp', 'gradeChangeType',\n 'courseId', 'actorUserId', 'CreateDate', 'LastModifiedDate']]\n grade_history_df = grade_history_df.rename(columns={\n 'gradeTimestamp': 'ActivityDateTime', 'gradeChangeType':\n 'ActivityStatus', 'courseId': 'LMSSectionIdentifier',\n 'actorUserId': 'LMSUserIdentifier'})\n grade_history_df['ActivityType'] = ACTIVITY_TYPE_GRADE\n user_submission_df = user_submission_df.append(grade_history_df)\n user_submission_df.drop_duplicates(subset=['SourceSystemIdentifier'],\n inplace=True)\n user_submission_df['ActivityTimeInMinutes'] = ''\n user_submission_df['Content'] = ''\n user_submission_df['SourceSystem'] = SOURCE_SYSTEM\n user_submission_df['SourceCreateDate'] = ''\n user_submission_df['SourceLastModifiedDate'] = ''\n result: Dict[str, DataFrame] = dict(tuple(user_submission_df.groupby([\n 'LMSSectionIdentifier'])))\n return result\n", "<import token>\n<assignment token>\n\n\ndef submissions_to_user_submission_activities_dfs(submissions_df: DataFrame\n ) ->Dict[str, DataFrame]:\n \"\"\"\n Convert a Submission API DataFrame to a Dict of UserActivity\n UDM DataFrames grouped by source system section id.\n\n Parameters\n ----------\n submissions_df: DataFrame\n is a Submission API DataFrame\n\n Returns\n -------\n Dict[str, DataFrame] LMS UDM UserActivity DataFrames\n grouped by source system section id\n\n Notes\n -----\n UserActivity DataFrame columns are:\n ActivityDateTime: The date/time the activity occurred\n ActivityStatus: The activity status\n ActivityTimeInMinutes: The total activity time in minutes\n ActivityType: The type of activity, here \"Submission\" or \"Grade\"\n AssignmentIdentifier: A unique numeric identifier assigned to the assignment\n Content: Content associated with the activity\n LMSSectionIdentifier: A unique numeric identifier assigned to the section\n SourceSystem: The system code or name providing the user activity data\n SourceSystemIdentifier: A unique number or alphanumeric code assigned to a\n user activity by the source system\n LMSUserIdentifier: A unique numeric identifier assigned to the user\n CreateDate: Date this record was created in the extractor\n LastModifiedDate: Date this record was last updated in the extractor\n \"\"\"\n assert 'submissionHistory' in submissions_df.columns\n assert 'id' in submissions_df.columns\n assert 'courseId' in submissions_df.columns\n assert 'courseWorkId' in submissions_df.columns\n submissions_df['submissionHistory'] = submissions_df['submissionHistory'\n ].apply(lambda json_like: json.loads(json_like.replace(\"'\", '\"')))\n submissions_df['AssignmentIdentifier'] = submissions_df[['courseId',\n 'courseWorkId']].agg('-'.join, axis=1)\n submissions_df = submissions_df[['id', 'courseId', 'courseWorkId',\n 'submissionHistory', 'AssignmentIdentifier', 'CreateDate',\n 'LastModifiedDate']]\n history_df = submissions_df.explode(column='submissionHistory')\n history_df = history_df['submissionHistory'].apply(Series).merge(history_df\n , left_index=True, right_index=True, how='outer')\n history_df.drop(columns=['submissionHistory'], inplace=True)\n user_submission_df = concat([history_df, history_df['stateHistory'].\n apply(Series)], axis=1)\n user_submission_df.dropna(subset=['stateHistory'], inplace=True)\n user_submission_df['SourceSystemIdentifier'] = 'S-' + user_submission_df[[\n 'courseId', 'courseWorkId', 'id', 'stateTimestamp']].agg('-'.join,\n axis=1)\n user_submission_df = user_submission_df[['SourceSystemIdentifier',\n 'AssignmentIdentifier', 'stateTimestamp', 'state', 'courseId',\n 'actorUserId', 'CreateDate', 'LastModifiedDate']]\n user_submission_df = user_submission_df.rename(columns={\n 'stateTimestamp': 'ActivityDateTime', 'state': 'ActivityStatus',\n 'courseId': 'LMSSectionIdentifier', 'actorUserId': 'LMSUserIdentifier'}\n )\n user_submission_df['ActivityType'] = ACTIVITY_TYPE_STATE\n if 'gradeHistory' in history_df:\n grade_history_df = concat([history_df, history_df['gradeHistory'].\n apply(Series)], axis=1)\n grade_history_df.dropna(subset=['gradeHistory'], inplace=True)\n grade_history_df['SourceSystemIdentifier'] = 'G-' + grade_history_df[[\n 'courseId', 'courseWorkId', 'id', 'gradeTimestamp']].agg('-'.\n join, axis=1)\n grade_history_df = grade_history_df[['SourceSystemIdentifier',\n 'AssignmentIdentifier', 'gradeTimestamp', 'gradeChangeType',\n 'courseId', 'actorUserId', 'CreateDate', 'LastModifiedDate']]\n grade_history_df = grade_history_df.rename(columns={\n 'gradeTimestamp': 'ActivityDateTime', 'gradeChangeType':\n 'ActivityStatus', 'courseId': 'LMSSectionIdentifier',\n 'actorUserId': 'LMSUserIdentifier'})\n grade_history_df['ActivityType'] = ACTIVITY_TYPE_GRADE\n user_submission_df = user_submission_df.append(grade_history_df)\n user_submission_df.drop_duplicates(subset=['SourceSystemIdentifier'],\n inplace=True)\n user_submission_df['ActivityTimeInMinutes'] = ''\n user_submission_df['Content'] = ''\n user_submission_df['SourceSystem'] = SOURCE_SYSTEM\n user_submission_df['SourceCreateDate'] = ''\n user_submission_df['SourceLastModifiedDate'] = ''\n result: Dict[str, DataFrame] = dict(tuple(user_submission_df.groupby([\n 'LMSSectionIdentifier'])))\n return result\n", "<import token>\n<assignment token>\n<function token>\n" ]
false
319
bd3b1263d7d657fe2edd3c7198f63821a3d1d1e5
import random from . import WaiterInterface class RandomIPv4Waiter(WaiterInterface): """ HostPortWaiter which generates random ipv4 adresses """ def __init__(self, options): self.ports = options['ports'] self.limit_generate = options.get('limit_generate', -1) def generator(self): while self.limit_generate != 0: randomIPv4 = generateRandomIPv4() yield (randomIPv4, self.ports) if self.limit_generate != -1: self.limit_generate -= 1 def generateRandomIPv4(): """ Helper method to generate a random ipv4 adress """ return ".".join(map(str, (random.randint(0, 255) for _ in range(4))))
[ "import random\n\nfrom . import WaiterInterface\n\nclass RandomIPv4Waiter(WaiterInterface):\n \"\"\"\n HostPortWaiter which generates random ipv4 adresses\n \"\"\"\n def __init__(self, options):\n self.ports = options['ports']\n self.limit_generate = options.get('limit_generate', -1)\n\n def generator(self):\n while self.limit_generate != 0:\n randomIPv4 = generateRandomIPv4()\n yield (randomIPv4, self.ports)\n if self.limit_generate != -1:\n self.limit_generate -= 1\n\ndef generateRandomIPv4():\n \"\"\"\n Helper method to generate a random ipv4 adress\n \"\"\"\n return \".\".join(map(str, (random.randint(0, 255) for _ in range(4))))\n", "import random\nfrom . import WaiterInterface\n\n\nclass RandomIPv4Waiter(WaiterInterface):\n \"\"\"\n HostPortWaiter which generates random ipv4 adresses\n \"\"\"\n\n def __init__(self, options):\n self.ports = options['ports']\n self.limit_generate = options.get('limit_generate', -1)\n\n def generator(self):\n while self.limit_generate != 0:\n randomIPv4 = generateRandomIPv4()\n yield randomIPv4, self.ports\n if self.limit_generate != -1:\n self.limit_generate -= 1\n\n\ndef generateRandomIPv4():\n \"\"\"\n Helper method to generate a random ipv4 adress\n \"\"\"\n return '.'.join(map(str, (random.randint(0, 255) for _ in range(4))))\n", "<import token>\n\n\nclass RandomIPv4Waiter(WaiterInterface):\n \"\"\"\n HostPortWaiter which generates random ipv4 adresses\n \"\"\"\n\n def __init__(self, options):\n self.ports = options['ports']\n self.limit_generate = options.get('limit_generate', -1)\n\n def generator(self):\n while self.limit_generate != 0:\n randomIPv4 = generateRandomIPv4()\n yield randomIPv4, self.ports\n if self.limit_generate != -1:\n self.limit_generate -= 1\n\n\ndef generateRandomIPv4():\n \"\"\"\n Helper method to generate a random ipv4 adress\n \"\"\"\n return '.'.join(map(str, (random.randint(0, 255) for _ in range(4))))\n", "<import token>\n\n\nclass RandomIPv4Waiter(WaiterInterface):\n \"\"\"\n HostPortWaiter which generates random ipv4 adresses\n \"\"\"\n\n def __init__(self, options):\n self.ports = options['ports']\n self.limit_generate = options.get('limit_generate', -1)\n\n def generator(self):\n while self.limit_generate != 0:\n randomIPv4 = generateRandomIPv4()\n yield randomIPv4, self.ports\n if self.limit_generate != -1:\n self.limit_generate -= 1\n\n\n<function token>\n", "<import token>\n\n\nclass RandomIPv4Waiter(WaiterInterface):\n <docstring token>\n\n def __init__(self, options):\n self.ports = options['ports']\n self.limit_generate = options.get('limit_generate', -1)\n\n def generator(self):\n while self.limit_generate != 0:\n randomIPv4 = generateRandomIPv4()\n yield randomIPv4, self.ports\n if self.limit_generate != -1:\n self.limit_generate -= 1\n\n\n<function token>\n", "<import token>\n\n\nclass RandomIPv4Waiter(WaiterInterface):\n <docstring token>\n\n def __init__(self, options):\n self.ports = options['ports']\n self.limit_generate = options.get('limit_generate', -1)\n <function token>\n\n\n<function token>\n", "<import token>\n\n\nclass RandomIPv4Waiter(WaiterInterface):\n <docstring token>\n <function token>\n <function token>\n\n\n<function token>\n", "<import token>\n<class token>\n<function token>\n" ]
false
320
5830a6001d7db50002c44aede6fb10938fa01dd1
import nltk class Text(object): def __init__(self, text): self.text = text self.words = nltk.word_tokenize(text) self.sents = nltk.sent_tokenize(text) class Passage(Text): def __init__(self, title, story, questions): Text.__init__(self,story) self.title = title self.questions = questions def display(self): print self.title + '\n' print self.text + '\n\n***\n' for q in self.questions: print '\n' + q.text + ' (' + q.qtype + ')' for a in q.answers: print '\t' + a.text print '\n\tCorrect Answer: ' + q.correct_answer.text class Question(Text): def __init__(self, qtext, qtype, answers, correct_answer): Text.__init__(self,qtext) self.qtype = qtype self.answers = answers self.correct_answer = correct_answer class Answer(Text): def __init__(self, atext): Text.__init__(self,atext)
[ "import nltk\n\nclass Text(object):\n \n def __init__(self, text):\n self.text = text\n self.words = nltk.word_tokenize(text)\n self.sents = nltk.sent_tokenize(text)\n\nclass Passage(Text):\n\n def __init__(self, title, story, questions):\n Text.__init__(self,story)\n self.title = title\n self.questions = questions\n \n def display(self):\n print self.title + '\\n'\n print self.text + '\\n\\n***\\n'\n for q in self.questions:\n print '\\n' + q.text + ' (' + q.qtype + ')'\n for a in q.answers:\n print '\\t' + a.text\n print '\\n\\tCorrect Answer: ' + q.correct_answer.text\n \nclass Question(Text):\n \n def __init__(self, qtext, qtype, answers, correct_answer):\n Text.__init__(self,qtext)\n self.qtype = qtype\n self.answers = answers\n self.correct_answer = correct_answer\n\nclass Answer(Text):\n \n def __init__(self, atext):\n Text.__init__(self,atext)" ]
true
321
d2c31d9c3cc66b43966cfd852582539d4e4bea17
from selenium import webdriver import time import datetime import os import openpyxl as vb from selenium.webdriver.support.wait import WebDriverWait from selenium.webdriver.common.action_chains import ActionChains def deconnexion(Chrome): """登陆""" """初始化""" global web, actions web = webdriver.Chrome(Chrome) #公司电脑 # web = webdriver.Chrome(r'D:\python\webdrivers\chromedriver.exe') #自己的电脑 web.maximize_window() web.implicitly_wait(10) # 最大运行时间不超过10秒 web.get('http://www.wjw-cdc.com:8003/user-center-portal/login?redirect=%2Fmain') actions = ActionChains(web) """登录网页""" username = web.find_element_by_xpath('/html/body/div/div/div[1]/div/div[2]/form/div[1]/div/div[1]/input') # 获得账号和密码 password = web.find_element_by_xpath('/html/body/div/div/div[1]/div/div[2]/form/div[2]/div/div[2]/input') username.send_keys('15375429564') password.send_keys("cdc1234cdc") enter = web.find_element_by_xpath("/html/body/div/div/div[1]/div/div[2]/form/div[3]/div/button") enter.click() return 0 def menu_lien(): """跳转页面""" enter_into = web.find_element_by_xpath( "/html/body/div[1]/div/div[2]/section/div/div[1]/ul/li[1]/ul/li[2]/span/div[2]/section/article") enter_into.click() return 0 def confirm_area(city, area): """确定区域""" """点击区域""" enter_area = web.find_element_by_xpath("/html/body/div[1]/section/main/div/div[3]/div[1]/span/div/div[1]/input").click() """点击安徽省""" enter_on_on = web.find_element_by_class_name("el-cascader__dropdown") enter_on = enter_on_on.find_element_by_class_name("el-cascader-panel") try: enter_AnHui_on_on = enter_on.find_elements_by_class_name("el-scrollbar") enter_AnHui_on =enter_AnHui_on_on[0].find_element_by_class_name("el-scrollbar__view") except: time.sleep(1) enter_AnHui_on_on = enter_on.find_elements_by_class_name("el-scrollbar") enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name("el-scrollbar__view") enter_AnHui = enter_AnHui_on.find_element_by_tag_name("li") enter_AnHui_down =enter_AnHui.find_element_by_class_name("el-radio__input") web.execute_script("arguments[0].click();", enter_AnHui_down) """选择城市""" enter_on_on = web.find_element_by_class_name("el-cascader__dropdown") enter_on = enter_on_on.find_element_by_class_name("el-cascader-panel") try: enter_city_on_on =enter_on.find_elements_by_class_name("el-scrollbar") enter_city_on = enter_city_on_on[1].find_element_by_class_name("el-cascader-menu__wrap") except: time.sleep(1) enter_city_on_on = enter_on.find_elements_by_class_name("el-scrollbar") enter_city_on = enter_city_on_on[1].find_element_by_class_name("el-cascader-menu__wrap") enter_city = enter_city_on.find_elements_by_tag_name("li") for i in range(len(enter_city)): enter_on_on = web.find_element_by_class_name("el-cascader__dropdown") enter_on = enter_on_on.find_element_by_class_name("el-cascader-panel") enter_city_on_on = enter_on.find_elements_by_class_name("el-scrollbar") enter_city_on = enter_city_on_on[1].find_element_by_class_name("el-cascader-menu__wrap") enter_city = enter_city_on.find_elements_by_tag_name("li") if enter_city[i].text ==city: enter_city_down = enter_city[i].find_element_by_class_name("el-radio__input") web.execute_script("arguments[0].click();", enter_city_down) break """选则区县""" enter_on_on = web.find_element_by_class_name("el-cascader__dropdown") enter_on = enter_on_on.find_element_by_class_name("el-cascader-panel") try: enter_area_on_on =enter_on.find_elements_by_class_name("el-scrollbar") enter_area_on = enter_area_on_on[2].find_element_by_class_name("el-cascader-menu__wrap") except: time.sleep(1) enter_area_on_on = enter_on.find_elements_by_class_name("el-scrollbar") enter_area_on = enter_area_on_on[2].find_element_by_class_name("el-cascader-menu__wrap") enter_area = enter_area_on.find_elements_by_tag_name("li") for i in range(len(enter_area)): enter_on_on = web.find_element_by_class_name("el-cascader__dropdown") enter_on = enter_on_on.find_element_by_class_name("el-cascader-panel") enter_area_on_on = enter_on.find_elements_by_class_name("el-scrollbar") enter_area_on = enter_area_on_on[2].find_element_by_class_name("el-cascader-menu__wrap") enter_area = enter_area_on.find_elements_by_tag_name("li") if enter_area[i].text ==area: enter_area_down = enter_area[i].find_element_by_class_name("el-radio__input") web.execute_script("arguments[0].click();", enter_area_down) break return 0 def confirm_time_on(excel_time): if type(excel_time) == str: return str(excel_time) elif type(excel_time) == datetime.datetime: excel_time_2 = excel_time.strftime('%Y-%m-%d') return str(excel_time_2) def confirm_tiem(time): """确定时间""" time =confirm_time_on(time) enter_time = web.find_elements_by_class_name("el-range-input") for i in enter_time: i.send_keys(time) return 0 def confirm_cause(cause): """选则症状""" enter_symptom = web.find_element_by_xpath("/html/body/div[1]/section/main/div/div[3]/div[3]/span/div/div[2]/input").click() enter_on = web.find_element_by_class_name("is-multiple") enter_on_1 =enter_on.find_element_by_class_name("el-scrollbar") enter_on_symptom = enter_on_1.find_elements_by_tag_name("li") for i in range(len(enter_on_symptom)): enter_on = web.find_element_by_class_name("is-multiple") enter_on_symptom = enter_on.find_elements_by_tag_name("li") if enter_on_symptom[i].text == cause: enter_on_symptom[i].click() break return 0 def search(): """点击搜索""" enter_search = web.find_element_by_xpath("/html/body/div[1]/section/main/div/div[3]/button[1]").click() return 0 def reset(): """点击重置""" enter_reset = web.find_element_by_xpath("/html/body/div/section/main/div/div[3]/button[2]").click() return 0 def pending(): """待处理""" enter_pending = web.find_element_by_xpath( "/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[1]").click() return 0 def accomplish(): """已完成""" enter__accomplish = web.find_element_by_xpath( "/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[3]").click() return 0 def download_cas(): """下载病例""" enter_download_cas = web.find_element_by_xpath( "/html/body/div[1]/section/main/section/main/div[2]/ul/li[2]").click() enter_download_cas_1 = web.find_element_by_xpath( "/html/body/div[1]/section/main/section/main/div[2]/div/div[2]/div/div[1]/div/button[3]").click() return 0 def resetting_excel(cause, clinique, path="D:\林钟\下载"): """重命名病例""" try: files = os.listdir(path) src = path + "\\" + "外呼结果导出表格.xlsx" if cause =="发热伴畏寒|寒战": cause ="发热伴畏寒寒战'" if cause == "畏寒|寒战": cause = "畏寒寒战'" dst = path + "\\" + clinique + "--" + cause + ".xlsx" os.rename(src, dst) except (FileExistsError): files = os.listdir(path) src = path + "\\" + "外呼结果导出表格.xlsx" if cause =="发热伴畏寒|寒战": cause ="发热伴畏寒寒战'" if cause == "畏寒|寒战": cause = "畏寒寒战'" dst = path + "\\" + clinique + "--" + cause + ".xlsx" os.remove(dst) os.rename(src, dst) return 0 def pagination(): #获取当前界面一共有多少条数据 pagination__total = web.find_element_by_xpath("/html/body/div[1]/section/main/div/div[5]/span[1]") a = int(pagination__total.text[2:-2]) return a def search_data(cause, city, area, clinique, excel_time): """查找数据""" ls_2 =[] #存储最终点击的元素,如果为空则说明没找到。 trlist_table_on = web.find_element_by_class_name("is-scrolling-none") trlist_table = trlist_table_on.find_element_by_class_name("el-table__body") trlist_tr = trlist_table.find_elements_by_tag_name("tr") for row in range(len(trlist_tr)): trlist_table = web.find_element_by_class_name("el-table__body") trlist_tr = trlist_table.find_elements_by_tag_name("tr") trlist_td = trlist_tr[row].find_elements_by_tag_name("td") i = 0 j = 0 ls = [] for col in range(len(trlist_td)): i += 1 if i == 2: ls.append(trlist_td[col].text) elif i == 3: ls.append(trlist_td[col].text) elif i == 7: ls.append(trlist_td[col]) elif i == 9: j = 1 ls.append((trlist_td[col])) trlist_td = trlist_tr[row].find_elements_by_tag_name("td") if ls[0] == cause: if ls[1] == ("安徽省/" + city + "/" + area + "/" + clinique): if j == 0: # ls[2].click() ls_2.append(ls[2]) elif j == 1: # ls[3].click() ls_2.append(ls[3]) return ls_2 def search_data_down(cause,clinique,path): """找到病例后的对病例进行一系列的处理""" """下载病例""" download_cas() """返回上一界面""" web.back() """点击重置""" reset() """点击待完成""" pending() """给病例重命名""" time.sleep(2) try: resetting_excel(cause, clinique,path) except FileNotFoundError: time.sleep(2) resetting_excel(cause, clinique,path) print(clinique + "--" + cause + "已下载完成!") def tourne_page(): enter_tourne_page =web.find_element_by_xpath("/html/body/div[1]/section/main/div/div[5]/button[2]/i").click() return "" def search_data_on(cause, city, area, clinique, excel_time,path): """核心处理流程""" time.sleep(2) number = pagination() """判断待处理下标是否为0""" if number == 0 : """点击已完成""" accomplish() time.sleep(2) number_accmplish_1 = pagination() """判断已完成的下标是否为0""" if number_accmplish_1 == 0: """如果为0下载失败""" download_revers.append(clinique + "--" + cause + " 下载失败!") else: """不为0判断当前界面是否只有20条数据""" if 0 < number_accmplish_1 <= 20: """只有20条数据查找数据""" accomplish_search_data = search_data(cause, city, area, clinique, excel_time) if len(accomplish_search_data) == 0: """如果没找到结束""" download_revers.append(clinique + "--" + cause + " 下载失败!") reset() else: """如果找到则点击""" accomplish_search_data[0].click() search_data_down(cause,clinique,path) elif 20 < number_accmplish_1 <= 40: """多于20条数据""" accomplish_search_data = search_data(cause, city, area, clinique, excel_time) """判断第一页有没有查到""" if len(accomplish_search_data) == 0: """如果没找到翻页""" tourne_page() accomplish_search_data = search_data(cause, city, area, clinique, excel_time) """判断翻页后有没有找到""" if len(accomplish_search_data) == 0: """如果没找到存入列表""" download_revers.append(clinique + "--" + cause + " 下载失败!") reset() else: """找到后点击""" accomplish_search_data[0].click() search_data_down(cause,clinique,path) else: download_revers.append(clinique + "--" + cause + " 下载失败!") reset() else: """判断待处理里是否小于20条数据""" if 0 < number <= 20: """如果小于进行查找""" pending__search_data = search_data(cause, city, area, clinique, excel_time) """判断有没有找到""" if len(pending__search_data) == 0: """没找到""" """点击已完成""" accomplish() time.sleep(2) number_accmplish_1 = pagination() """判断已完成的下标是否为0""" if number_accmplish_1 == 0: """如果为0下载失败""" download_revers.append(clinique + "--" + cause + " 下载失败!") else: """不为0判断当前界面是否只有20条数据""" if 0 < number_accmplish_1 <= 20: """只有20条数据查找数据""" accomplish_search_data = search_data(cause, city, area, clinique, excel_time) if len(accomplish_search_data) == 0: """如果没找到结束""" download_revers.append(clinique + "--" + cause + " 下载失败!") reset() else: """如果找到则点击""" accomplish_search_data[0].click() search_data_down(cause, clinique, path) elif 20 < number_accmplish_1 <= 40: """多于20条数据""" accomplish_search_data = search_data(cause, city, area, clinique, excel_time) """判断第一页有没有查到""" if len(accomplish_search_data) == 0: """如果没找到翻页""" tourne_page() accomplish_search_data = search_data(cause, city, area, clinique, excel_time) """判断翻页后有没有找到""" if len(accomplish_search_data) == 0: """如果没找到存入列表""" download_revers.append(clinique + "--" + cause + " 下载失败!") reset() else: """找到后点击""" accomplish_search_data[0].click() search_data_down(cause, clinique, path) else: download_revers.append(clinique + "--" + cause + " 下载失败!") reset() else: """找到了""" pending__search_data[0].click() search_data_down(cause,clinique,path) # elif 20< number <= 40: # pending__search_data = search_data(cause, city, area, clinique, excel_time) # """判断有没有找到""" # if len(pending__search_data) == 0: if __name__ == "__main__": download_revers = [] """初始化""" url = input("请输入文件的绝对路径:") #文件路径 path = "D:\林钟\下载" # 下载路径 Chrome = r'D:\PYthon\webdrivers\chromedriver.exe' #驱动路径 time1 = time.time() """登录页面""" deconnexion(Chrome) print("已登陆") menu_lien() print("已跳转") """读取表格""" excel = vb.load_workbook(url) sheet = excel["1-每日监控告警明细"] subscript = 1 for i in sheet.iter_rows(min_row=2, max_row=101, max_col=1): for cell in i: if cell.value in ["3", 3, "高"]: """初始化数值""" cause = sheet["I" + str(cell.row)].value city = sheet["E" + str(cell.row)].value area = sheet["F" + str(cell.row)].value clinique = sheet["G" + str(cell.row)].value excel_time = sheet["D" + str(cell.row)].value """搜索""" try: confirm_area(city, area) confirm_tiem(excel_time) confirm_cause(cause) search() except: try: web.refresh() # 刷新方法 refresh print('刷新成功') confirm_area(city, area) confirm_tiem(excel_time) confirm_cause(cause) search() except Exception as e: print("刷新失败!", format(e)) """查找数据""" search_data_on(cause, city, area, clinique, excel_time, path) """打印最终结果""" print("") print("<-----------下面是下载失败的----------->") for i in download_revers: print(i) print("已全部下载完毕") time2 = time.time() print("用时:{:.2f} 秒".format(time2-time1))
[ "from selenium import webdriver\nimport time\nimport datetime\nimport os\nimport openpyxl as vb\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.common.action_chains import ActionChains\n\ndef deconnexion(Chrome):\n \"\"\"登陆\"\"\"\n \"\"\"初始化\"\"\"\n global web, actions\n web = webdriver.Chrome(Chrome) #公司电脑\n # web = webdriver.Chrome(r'D:\\python\\webdrivers\\chromedriver.exe') #自己的电脑\n web.maximize_window()\n web.implicitly_wait(10) # 最大运行时间不超过10秒\n web.get('http://www.wjw-cdc.com:8003/user-center-portal/login?redirect=%2Fmain')\n actions = ActionChains(web)\n\n \"\"\"登录网页\"\"\"\n username = web.find_element_by_xpath('/html/body/div/div/div[1]/div/div[2]/form/div[1]/div/div[1]/input') # 获得账号和密码\n password = web.find_element_by_xpath('/html/body/div/div/div[1]/div/div[2]/form/div[2]/div/div[2]/input')\n username.send_keys('15375429564')\n password.send_keys(\"cdc1234cdc\")\n enter = web.find_element_by_xpath(\"/html/body/div/div/div[1]/div/div[2]/form/div[3]/div/button\")\n enter.click()\n return 0\ndef menu_lien():\n \"\"\"跳转页面\"\"\"\n enter_into = web.find_element_by_xpath(\n \"/html/body/div[1]/div/div[2]/section/div/div[1]/ul/li[1]/ul/li[2]/span/div[2]/section/article\")\n enter_into.click()\n return 0\ndef confirm_area(city, area):\n \"\"\"确定区域\"\"\"\n \"\"\"点击区域\"\"\"\n enter_area = web.find_element_by_xpath(\"/html/body/div[1]/section/main/div/div[3]/div[1]/span/div/div[1]/input\").click()\n \"\"\"点击安徽省\"\"\"\n enter_on_on = web.find_element_by_class_name(\"el-cascader__dropdown\")\n enter_on = enter_on_on.find_element_by_class_name(\"el-cascader-panel\")\n try:\n enter_AnHui_on_on = enter_on.find_elements_by_class_name(\"el-scrollbar\")\n enter_AnHui_on =enter_AnHui_on_on[0].find_element_by_class_name(\"el-scrollbar__view\")\n except:\n time.sleep(1)\n enter_AnHui_on_on = enter_on.find_elements_by_class_name(\"el-scrollbar\")\n enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(\"el-scrollbar__view\")\n enter_AnHui = enter_AnHui_on.find_element_by_tag_name(\"li\")\n enter_AnHui_down =enter_AnHui.find_element_by_class_name(\"el-radio__input\")\n web.execute_script(\"arguments[0].click();\", enter_AnHui_down)\n \"\"\"选择城市\"\"\"\n enter_on_on = web.find_element_by_class_name(\"el-cascader__dropdown\")\n enter_on = enter_on_on.find_element_by_class_name(\"el-cascader-panel\")\n try:\n enter_city_on_on =enter_on.find_elements_by_class_name(\"el-scrollbar\")\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\"el-cascader-menu__wrap\")\n except:\n time.sleep(1)\n enter_city_on_on = enter_on.find_elements_by_class_name(\"el-scrollbar\")\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\"el-cascader-menu__wrap\")\n enter_city = enter_city_on.find_elements_by_tag_name(\"li\")\n for i in range(len(enter_city)):\n enter_on_on = web.find_element_by_class_name(\"el-cascader__dropdown\")\n enter_on = enter_on_on.find_element_by_class_name(\"el-cascader-panel\")\n enter_city_on_on = enter_on.find_elements_by_class_name(\"el-scrollbar\")\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\"el-cascader-menu__wrap\")\n enter_city = enter_city_on.find_elements_by_tag_name(\"li\")\n if enter_city[i].text ==city:\n enter_city_down = enter_city[i].find_element_by_class_name(\"el-radio__input\")\n web.execute_script(\"arguments[0].click();\", enter_city_down)\n break\n \"\"\"选则区县\"\"\"\n enter_on_on = web.find_element_by_class_name(\"el-cascader__dropdown\")\n enter_on = enter_on_on.find_element_by_class_name(\"el-cascader-panel\")\n try:\n enter_area_on_on =enter_on.find_elements_by_class_name(\"el-scrollbar\")\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\"el-cascader-menu__wrap\")\n except:\n time.sleep(1)\n enter_area_on_on = enter_on.find_elements_by_class_name(\"el-scrollbar\")\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\"el-cascader-menu__wrap\")\n enter_area = enter_area_on.find_elements_by_tag_name(\"li\")\n for i in range(len(enter_area)):\n enter_on_on = web.find_element_by_class_name(\"el-cascader__dropdown\")\n enter_on = enter_on_on.find_element_by_class_name(\"el-cascader-panel\")\n enter_area_on_on = enter_on.find_elements_by_class_name(\"el-scrollbar\")\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\"el-cascader-menu__wrap\")\n enter_area = enter_area_on.find_elements_by_tag_name(\"li\")\n if enter_area[i].text ==area:\n enter_area_down = enter_area[i].find_element_by_class_name(\"el-radio__input\")\n web.execute_script(\"arguments[0].click();\", enter_area_down)\n break\n\n return 0\ndef confirm_time_on(excel_time):\n if type(excel_time) == str:\n return str(excel_time)\n elif type(excel_time) == datetime.datetime:\n excel_time_2 = excel_time.strftime('%Y-%m-%d')\n return str(excel_time_2)\ndef confirm_tiem(time):\n \"\"\"确定时间\"\"\"\n time =confirm_time_on(time)\n enter_time = web.find_elements_by_class_name(\"el-range-input\")\n for i in enter_time:\n i.send_keys(time)\n return 0\ndef confirm_cause(cause):\n \"\"\"选则症状\"\"\"\n enter_symptom = web.find_element_by_xpath(\"/html/body/div[1]/section/main/div/div[3]/div[3]/span/div/div[2]/input\").click()\n enter_on = web.find_element_by_class_name(\"is-multiple\")\n enter_on_1 =enter_on.find_element_by_class_name(\"el-scrollbar\")\n enter_on_symptom = enter_on_1.find_elements_by_tag_name(\"li\")\n for i in range(len(enter_on_symptom)):\n enter_on = web.find_element_by_class_name(\"is-multiple\")\n enter_on_symptom = enter_on.find_elements_by_tag_name(\"li\")\n if enter_on_symptom[i].text == cause:\n enter_on_symptom[i].click()\n break\n return 0\ndef search():\n \"\"\"点击搜索\"\"\"\n enter_search = web.find_element_by_xpath(\"/html/body/div[1]/section/main/div/div[3]/button[1]\").click()\n return 0\ndef reset():\n \"\"\"点击重置\"\"\"\n enter_reset = web.find_element_by_xpath(\"/html/body/div/section/main/div/div[3]/button[2]\").click()\n return 0\ndef pending():\n \"\"\"待处理\"\"\"\n enter_pending = web.find_element_by_xpath(\n \"/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[1]\").click()\n return 0\ndef accomplish():\n \"\"\"已完成\"\"\"\n enter__accomplish = web.find_element_by_xpath(\n \"/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[3]\").click()\n return 0\ndef download_cas():\n \"\"\"下载病例\"\"\"\n enter_download_cas = web.find_element_by_xpath(\n \"/html/body/div[1]/section/main/section/main/div[2]/ul/li[2]\").click()\n enter_download_cas_1 = web.find_element_by_xpath(\n \"/html/body/div[1]/section/main/section/main/div[2]/div/div[2]/div/div[1]/div/button[3]\").click()\n return 0\ndef resetting_excel(cause, clinique, path=\"D:\\林钟\\下载\"):\n \"\"\"重命名病例\"\"\"\n try:\n files = os.listdir(path)\n src = path + \"\\\\\" + \"外呼结果导出表格.xlsx\"\n if cause ==\"发热伴畏寒|寒战\":\n cause =\"发热伴畏寒寒战'\"\n if cause == \"畏寒|寒战\":\n cause = \"畏寒寒战'\"\n dst = path + \"\\\\\" + clinique + \"--\" + cause + \".xlsx\"\n os.rename(src, dst)\n except (FileExistsError):\n files = os.listdir(path)\n src = path + \"\\\\\" + \"外呼结果导出表格.xlsx\"\n if cause ==\"发热伴畏寒|寒战\":\n cause =\"发热伴畏寒寒战'\"\n if cause == \"畏寒|寒战\":\n cause = \"畏寒寒战'\"\n dst = path + \"\\\\\" + clinique + \"--\" + cause + \".xlsx\"\n os.remove(dst)\n os.rename(src, dst)\n\n return 0\ndef pagination(): #获取当前界面一共有多少条数据\n pagination__total = web.find_element_by_xpath(\"/html/body/div[1]/section/main/div/div[5]/span[1]\")\n a = int(pagination__total.text[2:-2])\n return a\ndef search_data(cause, city, area, clinique, excel_time):\n \"\"\"查找数据\"\"\"\n ls_2 =[] #存储最终点击的元素,如果为空则说明没找到。\n trlist_table_on = web.find_element_by_class_name(\"is-scrolling-none\")\n trlist_table = trlist_table_on.find_element_by_class_name(\"el-table__body\")\n trlist_tr = trlist_table.find_elements_by_tag_name(\"tr\")\n for row in range(len(trlist_tr)):\n trlist_table = web.find_element_by_class_name(\"el-table__body\")\n trlist_tr = trlist_table.find_elements_by_tag_name(\"tr\")\n trlist_td = trlist_tr[row].find_elements_by_tag_name(\"td\")\n i = 0\n j = 0\n ls = []\n for col in range(len(trlist_td)):\n i += 1\n if i == 2:\n ls.append(trlist_td[col].text)\n elif i == 3:\n ls.append(trlist_td[col].text)\n elif i == 7:\n ls.append(trlist_td[col])\n elif i == 9:\n j = 1\n ls.append((trlist_td[col]))\n trlist_td = trlist_tr[row].find_elements_by_tag_name(\"td\")\n if ls[0] == cause:\n if ls[1] == (\"安徽省/\" + city + \"/\" + area + \"/\" + clinique):\n if j == 0:\n # ls[2].click()\n ls_2.append(ls[2])\n elif j == 1:\n # ls[3].click()\n ls_2.append(ls[3])\n return ls_2\ndef search_data_down(cause,clinique,path):\n \"\"\"找到病例后的对病例进行一系列的处理\"\"\"\n \"\"\"下载病例\"\"\"\n download_cas()\n \"\"\"返回上一界面\"\"\"\n web.back()\n \"\"\"点击重置\"\"\"\n reset()\n \"\"\"点击待完成\"\"\"\n pending()\n \"\"\"给病例重命名\"\"\"\n time.sleep(2)\n try:\n resetting_excel(cause, clinique,path)\n except FileNotFoundError:\n time.sleep(2)\n resetting_excel(cause, clinique,path)\n print(clinique + \"--\" + cause + \"已下载完成!\")\ndef tourne_page():\n enter_tourne_page =web.find_element_by_xpath(\"/html/body/div[1]/section/main/div/div[5]/button[2]/i\").click()\n return \"\"\ndef search_data_on(cause, city, area, clinique, excel_time,path):\n \"\"\"核心处理流程\"\"\"\n time.sleep(2)\n number = pagination()\n \"\"\"判断待处理下标是否为0\"\"\"\n if number == 0 :\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + \"--\" + cause + \" 下载失败!\")\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city, area, clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + \"--\" + cause + \" 下载失败!\")\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause,clinique,path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city, area, clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause, city, area, clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + \"--\" + cause + \" 下载失败!\")\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause,clinique,path)\n else:\n download_revers.append(clinique + \"--\" + cause + \" 下载失败!\")\n reset()\n else:\n \"\"\"判断待处理里是否小于20条数据\"\"\"\n if 0 < number <= 20:\n \"\"\"如果小于进行查找\"\"\"\n pending__search_data = search_data(cause, city, area, clinique, excel_time)\n \"\"\"判断有没有找到\"\"\"\n if len(pending__search_data) == 0:\n \"\"\"没找到\"\"\"\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + \"--\" + cause + \" 下载失败!\")\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city, area, clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + \"--\" + cause + \" 下载失败!\")\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city, area, clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause, city, area, clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + \"--\" + cause + \" 下载失败!\")\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + \"--\" + cause + \" 下载失败!\")\n reset()\n else:\n \"\"\"找到了\"\"\"\n pending__search_data[0].click()\n search_data_down(cause,clinique,path)\n\n # elif 20< number <= 40:\n # pending__search_data = search_data(cause, city, area, clinique, excel_time)\n # \"\"\"判断有没有找到\"\"\"\n # if len(pending__search_data) == 0:\n\n\nif __name__ == \"__main__\":\n\n download_revers = []\n \"\"\"初始化\"\"\"\n url = input(\"请输入文件的绝对路径:\") #文件路径\n path = \"D:\\林钟\\下载\" # 下载路径\n Chrome = r'D:\\PYthon\\webdrivers\\chromedriver.exe' #驱动路径\n time1 = time.time()\n \"\"\"登录页面\"\"\"\n deconnexion(Chrome)\n print(\"已登陆\")\n menu_lien()\n print(\"已跳转\")\n\n \"\"\"读取表格\"\"\"\n excel = vb.load_workbook(url)\n sheet = excel[\"1-每日监控告警明细\"]\n subscript = 1\n for i in sheet.iter_rows(min_row=2, max_row=101, max_col=1):\n for cell in i:\n if cell.value in [\"3\", 3, \"高\"]:\n\n \"\"\"初始化数值\"\"\"\n cause = sheet[\"I\" + str(cell.row)].value\n city = sheet[\"E\" + str(cell.row)].value\n area = sheet[\"F\" + str(cell.row)].value\n clinique = sheet[\"G\" + str(cell.row)].value\n excel_time = sheet[\"D\" + str(cell.row)].value\n\n \"\"\"搜索\"\"\"\n try:\n confirm_area(city, area)\n confirm_tiem(excel_time)\n confirm_cause(cause)\n search()\n except:\n try:\n web.refresh() # 刷新方法 refresh\n print('刷新成功')\n confirm_area(city, area)\n confirm_tiem(excel_time)\n confirm_cause(cause)\n search()\n except Exception as e:\n print(\"刷新失败!\", format(e))\n\n\n \"\"\"查找数据\"\"\"\n search_data_on(cause, city, area, clinique, excel_time, path)\n\n\n \"\"\"打印最终结果\"\"\"\n print(\"\")\n print(\"<-----------下面是下载失败的----------->\")\n for i in download_revers:\n print(i)\n print(\"已全部下载完毕\")\n time2 = time.time()\n print(\"用时:{:.2f} 秒\".format(time2-time1))", "from selenium import webdriver\nimport time\nimport datetime\nimport os\nimport openpyxl as vb\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.common.action_chains import ActionChains\n\n\ndef deconnexion(Chrome):\n \"\"\"登陆\"\"\"\n \"\"\"初始化\"\"\"\n global web, actions\n web = webdriver.Chrome(Chrome)\n web.maximize_window()\n web.implicitly_wait(10)\n web.get(\n 'http://www.wjw-cdc.com:8003/user-center-portal/login?redirect=%2Fmain'\n )\n actions = ActionChains(web)\n \"\"\"登录网页\"\"\"\n username = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[1]/div/div[1]/input')\n password = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[2]/div/div[2]/input')\n username.send_keys('15375429564')\n password.send_keys('cdc1234cdc')\n enter = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[3]/div/button')\n enter.click()\n return 0\n\n\ndef menu_lien():\n \"\"\"跳转页面\"\"\"\n enter_into = web.find_element_by_xpath(\n '/html/body/div[1]/div/div[2]/section/div/div[1]/ul/li[1]/ul/li[2]/span/div[2]/section/article'\n )\n enter_into.click()\n return 0\n\n\ndef confirm_area(city, area):\n \"\"\"确定区域\"\"\"\n \"\"\"点击区域\"\"\"\n enter_area = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/div[1]/span/div/div[1]/input'\n ).click()\n \"\"\"点击安徽省\"\"\"\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n try:\n enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'\n )\n enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(\n 'el-scrollbar__view')\n except:\n time.sleep(1)\n enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'\n )\n enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(\n 'el-scrollbar__view')\n enter_AnHui = enter_AnHui_on.find_element_by_tag_name('li')\n enter_AnHui_down = enter_AnHui.find_element_by_class_name('el-radio__input'\n )\n web.execute_script('arguments[0].click();', enter_AnHui_down)\n \"\"\"选择城市\"\"\"\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n try:\n enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n except:\n time.sleep(1)\n enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_city = enter_city_on.find_elements_by_tag_name('li')\n for i in range(len(enter_city)):\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_city = enter_city_on.find_elements_by_tag_name('li')\n if enter_city[i].text == city:\n enter_city_down = enter_city[i].find_element_by_class_name(\n 'el-radio__input')\n web.execute_script('arguments[0].click();', enter_city_down)\n break\n \"\"\"选则区县\"\"\"\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n try:\n enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n except:\n time.sleep(1)\n enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_area = enter_area_on.find_elements_by_tag_name('li')\n for i in range(len(enter_area)):\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_area = enter_area_on.find_elements_by_tag_name('li')\n if enter_area[i].text == area:\n enter_area_down = enter_area[i].find_element_by_class_name(\n 'el-radio__input')\n web.execute_script('arguments[0].click();', enter_area_down)\n break\n return 0\n\n\ndef confirm_time_on(excel_time):\n if type(excel_time) == str:\n return str(excel_time)\n elif type(excel_time) == datetime.datetime:\n excel_time_2 = excel_time.strftime('%Y-%m-%d')\n return str(excel_time_2)\n\n\ndef confirm_tiem(time):\n \"\"\"确定时间\"\"\"\n time = confirm_time_on(time)\n enter_time = web.find_elements_by_class_name('el-range-input')\n for i in enter_time:\n i.send_keys(time)\n return 0\n\n\ndef confirm_cause(cause):\n \"\"\"选则症状\"\"\"\n enter_symptom = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/div[3]/span/div/div[2]/input'\n ).click()\n enter_on = web.find_element_by_class_name('is-multiple')\n enter_on_1 = enter_on.find_element_by_class_name('el-scrollbar')\n enter_on_symptom = enter_on_1.find_elements_by_tag_name('li')\n for i in range(len(enter_on_symptom)):\n enter_on = web.find_element_by_class_name('is-multiple')\n enter_on_symptom = enter_on.find_elements_by_tag_name('li')\n if enter_on_symptom[i].text == cause:\n enter_on_symptom[i].click()\n break\n return 0\n\n\ndef search():\n \"\"\"点击搜索\"\"\"\n enter_search = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/button[1]').click()\n return 0\n\n\ndef reset():\n \"\"\"点击重置\"\"\"\n enter_reset = web.find_element_by_xpath(\n '/html/body/div/section/main/div/div[3]/button[2]').click()\n return 0\n\n\ndef pending():\n \"\"\"待处理\"\"\"\n enter_pending = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[1]'\n ).click()\n return 0\n\n\ndef accomplish():\n \"\"\"已完成\"\"\"\n enter__accomplish = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[3]'\n ).click()\n return 0\n\n\ndef download_cas():\n \"\"\"下载病例\"\"\"\n enter_download_cas = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/ul/li[2]').click()\n enter_download_cas_1 = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/div/div[2]/div/div[1]/div/button[3]'\n ).click()\n return 0\n\n\ndef resetting_excel(cause, clinique, path='D:\\\\林钟\\\\下载'):\n \"\"\"重命名病例\"\"\"\n try:\n files = os.listdir(path)\n src = path + '\\\\' + '外呼结果导出表格.xlsx'\n if cause == '发热伴畏寒|寒战':\n cause = \"发热伴畏寒寒战'\"\n if cause == '畏寒|寒战':\n cause = \"畏寒寒战'\"\n dst = path + '\\\\' + clinique + '--' + cause + '.xlsx'\n os.rename(src, dst)\n except FileExistsError:\n files = os.listdir(path)\n src = path + '\\\\' + '外呼结果导出表格.xlsx'\n if cause == '发热伴畏寒|寒战':\n cause = \"发热伴畏寒寒战'\"\n if cause == '畏寒|寒战':\n cause = \"畏寒寒战'\"\n dst = path + '\\\\' + clinique + '--' + cause + '.xlsx'\n os.remove(dst)\n os.rename(src, dst)\n return 0\n\n\ndef pagination():\n pagination__total = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[5]/span[1]')\n a = int(pagination__total.text[2:-2])\n return a\n\n\ndef search_data(cause, city, area, clinique, excel_time):\n \"\"\"查找数据\"\"\"\n ls_2 = []\n trlist_table_on = web.find_element_by_class_name('is-scrolling-none')\n trlist_table = trlist_table_on.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n for row in range(len(trlist_tr)):\n trlist_table = web.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n i = 0\n j = 0\n ls = []\n for col in range(len(trlist_td)):\n i += 1\n if i == 2:\n ls.append(trlist_td[col].text)\n elif i == 3:\n ls.append(trlist_td[col].text)\n elif i == 7:\n ls.append(trlist_td[col])\n elif i == 9:\n j = 1\n ls.append(trlist_td[col])\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n if ls[0] == cause:\n if ls[1] == '安徽省/' + city + '/' + area + '/' + clinique:\n if j == 0:\n ls_2.append(ls[2])\n elif j == 1:\n ls_2.append(ls[3])\n return ls_2\n\n\ndef search_data_down(cause, clinique, path):\n \"\"\"找到病例后的对病例进行一系列的处理\"\"\"\n \"\"\"下载病例\"\"\"\n download_cas()\n \"\"\"返回上一界面\"\"\"\n web.back()\n \"\"\"点击重置\"\"\"\n reset()\n \"\"\"点击待完成\"\"\"\n pending()\n \"\"\"给病例重命名\"\"\"\n time.sleep(2)\n try:\n resetting_excel(cause, clinique, path)\n except FileNotFoundError:\n time.sleep(2)\n resetting_excel(cause, clinique, path)\n print(clinique + '--' + cause + '已下载完成!')\n\n\ndef tourne_page():\n enter_tourne_page = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[5]/button[2]/i').click()\n return ''\n\n\ndef search_data_on(cause, city, area, clinique, excel_time, path):\n \"\"\"核心处理流程\"\"\"\n time.sleep(2)\n number = pagination()\n \"\"\"判断待处理下标是否为0\"\"\"\n if number == 0:\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"判断待处理里是否小于20条数据\"\"\"\n if 0 < number <= 20:\n \"\"\"如果小于进行查找\"\"\"\n pending__search_data = search_data(cause, city, area, clinique,\n excel_time)\n \"\"\"判断有没有找到\"\"\"\n if len(pending__search_data) == 0:\n \"\"\"没找到\"\"\"\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause,\n city, area, clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' +\n cause + ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到了\"\"\"\n pending__search_data[0].click()\n search_data_down(cause, clinique, path)\n\n\nif __name__ == '__main__':\n download_revers = []\n \"\"\"初始化\"\"\"\n url = input('请输入文件的绝对路径:')\n path = 'D:\\\\林钟\\\\下载'\n Chrome = 'D:\\\\PYthon\\\\webdrivers\\\\chromedriver.exe'\n time1 = time.time()\n \"\"\"登录页面\"\"\"\n deconnexion(Chrome)\n print('已登陆')\n menu_lien()\n print('已跳转')\n \"\"\"读取表格\"\"\"\n excel = vb.load_workbook(url)\n sheet = excel['1-每日监控告警明细']\n subscript = 1\n for i in sheet.iter_rows(min_row=2, max_row=101, max_col=1):\n for cell in i:\n if cell.value in ['3', 3, '高']:\n \"\"\"初始化数值\"\"\"\n cause = sheet['I' + str(cell.row)].value\n city = sheet['E' + str(cell.row)].value\n area = sheet['F' + str(cell.row)].value\n clinique = sheet['G' + str(cell.row)].value\n excel_time = sheet['D' + str(cell.row)].value\n \"\"\"搜索\"\"\"\n try:\n confirm_area(city, area)\n confirm_tiem(excel_time)\n confirm_cause(cause)\n search()\n except:\n try:\n web.refresh()\n print('刷新成功')\n confirm_area(city, area)\n confirm_tiem(excel_time)\n confirm_cause(cause)\n search()\n except Exception as e:\n print('刷新失败!', format(e))\n \"\"\"查找数据\"\"\"\n search_data_on(cause, city, area, clinique, excel_time, path)\n \"\"\"打印最终结果\"\"\"\n print('')\n print('<-----------下面是下载失败的----------->')\n for i in download_revers:\n print(i)\n print('已全部下载完毕')\n time2 = time.time()\n print('用时:{:.2f} 秒'.format(time2 - time1))\n", "<import token>\n\n\ndef deconnexion(Chrome):\n \"\"\"登陆\"\"\"\n \"\"\"初始化\"\"\"\n global web, actions\n web = webdriver.Chrome(Chrome)\n web.maximize_window()\n web.implicitly_wait(10)\n web.get(\n 'http://www.wjw-cdc.com:8003/user-center-portal/login?redirect=%2Fmain'\n )\n actions = ActionChains(web)\n \"\"\"登录网页\"\"\"\n username = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[1]/div/div[1]/input')\n password = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[2]/div/div[2]/input')\n username.send_keys('15375429564')\n password.send_keys('cdc1234cdc')\n enter = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[3]/div/button')\n enter.click()\n return 0\n\n\ndef menu_lien():\n \"\"\"跳转页面\"\"\"\n enter_into = web.find_element_by_xpath(\n '/html/body/div[1]/div/div[2]/section/div/div[1]/ul/li[1]/ul/li[2]/span/div[2]/section/article'\n )\n enter_into.click()\n return 0\n\n\ndef confirm_area(city, area):\n \"\"\"确定区域\"\"\"\n \"\"\"点击区域\"\"\"\n enter_area = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/div[1]/span/div/div[1]/input'\n ).click()\n \"\"\"点击安徽省\"\"\"\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n try:\n enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'\n )\n enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(\n 'el-scrollbar__view')\n except:\n time.sleep(1)\n enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'\n )\n enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(\n 'el-scrollbar__view')\n enter_AnHui = enter_AnHui_on.find_element_by_tag_name('li')\n enter_AnHui_down = enter_AnHui.find_element_by_class_name('el-radio__input'\n )\n web.execute_script('arguments[0].click();', enter_AnHui_down)\n \"\"\"选择城市\"\"\"\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n try:\n enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n except:\n time.sleep(1)\n enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_city = enter_city_on.find_elements_by_tag_name('li')\n for i in range(len(enter_city)):\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_city = enter_city_on.find_elements_by_tag_name('li')\n if enter_city[i].text == city:\n enter_city_down = enter_city[i].find_element_by_class_name(\n 'el-radio__input')\n web.execute_script('arguments[0].click();', enter_city_down)\n break\n \"\"\"选则区县\"\"\"\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n try:\n enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n except:\n time.sleep(1)\n enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_area = enter_area_on.find_elements_by_tag_name('li')\n for i in range(len(enter_area)):\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_area = enter_area_on.find_elements_by_tag_name('li')\n if enter_area[i].text == area:\n enter_area_down = enter_area[i].find_element_by_class_name(\n 'el-radio__input')\n web.execute_script('arguments[0].click();', enter_area_down)\n break\n return 0\n\n\ndef confirm_time_on(excel_time):\n if type(excel_time) == str:\n return str(excel_time)\n elif type(excel_time) == datetime.datetime:\n excel_time_2 = excel_time.strftime('%Y-%m-%d')\n return str(excel_time_2)\n\n\ndef confirm_tiem(time):\n \"\"\"确定时间\"\"\"\n time = confirm_time_on(time)\n enter_time = web.find_elements_by_class_name('el-range-input')\n for i in enter_time:\n i.send_keys(time)\n return 0\n\n\ndef confirm_cause(cause):\n \"\"\"选则症状\"\"\"\n enter_symptom = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/div[3]/span/div/div[2]/input'\n ).click()\n enter_on = web.find_element_by_class_name('is-multiple')\n enter_on_1 = enter_on.find_element_by_class_name('el-scrollbar')\n enter_on_symptom = enter_on_1.find_elements_by_tag_name('li')\n for i in range(len(enter_on_symptom)):\n enter_on = web.find_element_by_class_name('is-multiple')\n enter_on_symptom = enter_on.find_elements_by_tag_name('li')\n if enter_on_symptom[i].text == cause:\n enter_on_symptom[i].click()\n break\n return 0\n\n\ndef search():\n \"\"\"点击搜索\"\"\"\n enter_search = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/button[1]').click()\n return 0\n\n\ndef reset():\n \"\"\"点击重置\"\"\"\n enter_reset = web.find_element_by_xpath(\n '/html/body/div/section/main/div/div[3]/button[2]').click()\n return 0\n\n\ndef pending():\n \"\"\"待处理\"\"\"\n enter_pending = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[1]'\n ).click()\n return 0\n\n\ndef accomplish():\n \"\"\"已完成\"\"\"\n enter__accomplish = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[3]'\n ).click()\n return 0\n\n\ndef download_cas():\n \"\"\"下载病例\"\"\"\n enter_download_cas = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/ul/li[2]').click()\n enter_download_cas_1 = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/div/div[2]/div/div[1]/div/button[3]'\n ).click()\n return 0\n\n\ndef resetting_excel(cause, clinique, path='D:\\\\林钟\\\\下载'):\n \"\"\"重命名病例\"\"\"\n try:\n files = os.listdir(path)\n src = path + '\\\\' + '外呼结果导出表格.xlsx'\n if cause == '发热伴畏寒|寒战':\n cause = \"发热伴畏寒寒战'\"\n if cause == '畏寒|寒战':\n cause = \"畏寒寒战'\"\n dst = path + '\\\\' + clinique + '--' + cause + '.xlsx'\n os.rename(src, dst)\n except FileExistsError:\n files = os.listdir(path)\n src = path + '\\\\' + '外呼结果导出表格.xlsx'\n if cause == '发热伴畏寒|寒战':\n cause = \"发热伴畏寒寒战'\"\n if cause == '畏寒|寒战':\n cause = \"畏寒寒战'\"\n dst = path + '\\\\' + clinique + '--' + cause + '.xlsx'\n os.remove(dst)\n os.rename(src, dst)\n return 0\n\n\ndef pagination():\n pagination__total = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[5]/span[1]')\n a = int(pagination__total.text[2:-2])\n return a\n\n\ndef search_data(cause, city, area, clinique, excel_time):\n \"\"\"查找数据\"\"\"\n ls_2 = []\n trlist_table_on = web.find_element_by_class_name('is-scrolling-none')\n trlist_table = trlist_table_on.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n for row in range(len(trlist_tr)):\n trlist_table = web.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n i = 0\n j = 0\n ls = []\n for col in range(len(trlist_td)):\n i += 1\n if i == 2:\n ls.append(trlist_td[col].text)\n elif i == 3:\n ls.append(trlist_td[col].text)\n elif i == 7:\n ls.append(trlist_td[col])\n elif i == 9:\n j = 1\n ls.append(trlist_td[col])\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n if ls[0] == cause:\n if ls[1] == '安徽省/' + city + '/' + area + '/' + clinique:\n if j == 0:\n ls_2.append(ls[2])\n elif j == 1:\n ls_2.append(ls[3])\n return ls_2\n\n\ndef search_data_down(cause, clinique, path):\n \"\"\"找到病例后的对病例进行一系列的处理\"\"\"\n \"\"\"下载病例\"\"\"\n download_cas()\n \"\"\"返回上一界面\"\"\"\n web.back()\n \"\"\"点击重置\"\"\"\n reset()\n \"\"\"点击待完成\"\"\"\n pending()\n \"\"\"给病例重命名\"\"\"\n time.sleep(2)\n try:\n resetting_excel(cause, clinique, path)\n except FileNotFoundError:\n time.sleep(2)\n resetting_excel(cause, clinique, path)\n print(clinique + '--' + cause + '已下载完成!')\n\n\ndef tourne_page():\n enter_tourne_page = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[5]/button[2]/i').click()\n return ''\n\n\ndef search_data_on(cause, city, area, clinique, excel_time, path):\n \"\"\"核心处理流程\"\"\"\n time.sleep(2)\n number = pagination()\n \"\"\"判断待处理下标是否为0\"\"\"\n if number == 0:\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"判断待处理里是否小于20条数据\"\"\"\n if 0 < number <= 20:\n \"\"\"如果小于进行查找\"\"\"\n pending__search_data = search_data(cause, city, area, clinique,\n excel_time)\n \"\"\"判断有没有找到\"\"\"\n if len(pending__search_data) == 0:\n \"\"\"没找到\"\"\"\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause,\n city, area, clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' +\n cause + ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到了\"\"\"\n pending__search_data[0].click()\n search_data_down(cause, clinique, path)\n\n\nif __name__ == '__main__':\n download_revers = []\n \"\"\"初始化\"\"\"\n url = input('请输入文件的绝对路径:')\n path = 'D:\\\\林钟\\\\下载'\n Chrome = 'D:\\\\PYthon\\\\webdrivers\\\\chromedriver.exe'\n time1 = time.time()\n \"\"\"登录页面\"\"\"\n deconnexion(Chrome)\n print('已登陆')\n menu_lien()\n print('已跳转')\n \"\"\"读取表格\"\"\"\n excel = vb.load_workbook(url)\n sheet = excel['1-每日监控告警明细']\n subscript = 1\n for i in sheet.iter_rows(min_row=2, max_row=101, max_col=1):\n for cell in i:\n if cell.value in ['3', 3, '高']:\n \"\"\"初始化数值\"\"\"\n cause = sheet['I' + str(cell.row)].value\n city = sheet['E' + str(cell.row)].value\n area = sheet['F' + str(cell.row)].value\n clinique = sheet['G' + str(cell.row)].value\n excel_time = sheet['D' + str(cell.row)].value\n \"\"\"搜索\"\"\"\n try:\n confirm_area(city, area)\n confirm_tiem(excel_time)\n confirm_cause(cause)\n search()\n except:\n try:\n web.refresh()\n print('刷新成功')\n confirm_area(city, area)\n confirm_tiem(excel_time)\n confirm_cause(cause)\n search()\n except Exception as e:\n print('刷新失败!', format(e))\n \"\"\"查找数据\"\"\"\n search_data_on(cause, city, area, clinique, excel_time, path)\n \"\"\"打印最终结果\"\"\"\n print('')\n print('<-----------下面是下载失败的----------->')\n for i in download_revers:\n print(i)\n print('已全部下载完毕')\n time2 = time.time()\n print('用时:{:.2f} 秒'.format(time2 - time1))\n", "<import token>\n\n\ndef deconnexion(Chrome):\n \"\"\"登陆\"\"\"\n \"\"\"初始化\"\"\"\n global web, actions\n web = webdriver.Chrome(Chrome)\n web.maximize_window()\n web.implicitly_wait(10)\n web.get(\n 'http://www.wjw-cdc.com:8003/user-center-portal/login?redirect=%2Fmain'\n )\n actions = ActionChains(web)\n \"\"\"登录网页\"\"\"\n username = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[1]/div/div[1]/input')\n password = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[2]/div/div[2]/input')\n username.send_keys('15375429564')\n password.send_keys('cdc1234cdc')\n enter = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[3]/div/button')\n enter.click()\n return 0\n\n\ndef menu_lien():\n \"\"\"跳转页面\"\"\"\n enter_into = web.find_element_by_xpath(\n '/html/body/div[1]/div/div[2]/section/div/div[1]/ul/li[1]/ul/li[2]/span/div[2]/section/article'\n )\n enter_into.click()\n return 0\n\n\ndef confirm_area(city, area):\n \"\"\"确定区域\"\"\"\n \"\"\"点击区域\"\"\"\n enter_area = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/div[1]/span/div/div[1]/input'\n ).click()\n \"\"\"点击安徽省\"\"\"\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n try:\n enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'\n )\n enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(\n 'el-scrollbar__view')\n except:\n time.sleep(1)\n enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'\n )\n enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(\n 'el-scrollbar__view')\n enter_AnHui = enter_AnHui_on.find_element_by_tag_name('li')\n enter_AnHui_down = enter_AnHui.find_element_by_class_name('el-radio__input'\n )\n web.execute_script('arguments[0].click();', enter_AnHui_down)\n \"\"\"选择城市\"\"\"\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n try:\n enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n except:\n time.sleep(1)\n enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_city = enter_city_on.find_elements_by_tag_name('li')\n for i in range(len(enter_city)):\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_city = enter_city_on.find_elements_by_tag_name('li')\n if enter_city[i].text == city:\n enter_city_down = enter_city[i].find_element_by_class_name(\n 'el-radio__input')\n web.execute_script('arguments[0].click();', enter_city_down)\n break\n \"\"\"选则区县\"\"\"\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n try:\n enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n except:\n time.sleep(1)\n enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_area = enter_area_on.find_elements_by_tag_name('li')\n for i in range(len(enter_area)):\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_area = enter_area_on.find_elements_by_tag_name('li')\n if enter_area[i].text == area:\n enter_area_down = enter_area[i].find_element_by_class_name(\n 'el-radio__input')\n web.execute_script('arguments[0].click();', enter_area_down)\n break\n return 0\n\n\ndef confirm_time_on(excel_time):\n if type(excel_time) == str:\n return str(excel_time)\n elif type(excel_time) == datetime.datetime:\n excel_time_2 = excel_time.strftime('%Y-%m-%d')\n return str(excel_time_2)\n\n\ndef confirm_tiem(time):\n \"\"\"确定时间\"\"\"\n time = confirm_time_on(time)\n enter_time = web.find_elements_by_class_name('el-range-input')\n for i in enter_time:\n i.send_keys(time)\n return 0\n\n\ndef confirm_cause(cause):\n \"\"\"选则症状\"\"\"\n enter_symptom = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/div[3]/span/div/div[2]/input'\n ).click()\n enter_on = web.find_element_by_class_name('is-multiple')\n enter_on_1 = enter_on.find_element_by_class_name('el-scrollbar')\n enter_on_symptom = enter_on_1.find_elements_by_tag_name('li')\n for i in range(len(enter_on_symptom)):\n enter_on = web.find_element_by_class_name('is-multiple')\n enter_on_symptom = enter_on.find_elements_by_tag_name('li')\n if enter_on_symptom[i].text == cause:\n enter_on_symptom[i].click()\n break\n return 0\n\n\ndef search():\n \"\"\"点击搜索\"\"\"\n enter_search = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/button[1]').click()\n return 0\n\n\ndef reset():\n \"\"\"点击重置\"\"\"\n enter_reset = web.find_element_by_xpath(\n '/html/body/div/section/main/div/div[3]/button[2]').click()\n return 0\n\n\ndef pending():\n \"\"\"待处理\"\"\"\n enter_pending = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[1]'\n ).click()\n return 0\n\n\ndef accomplish():\n \"\"\"已完成\"\"\"\n enter__accomplish = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[3]'\n ).click()\n return 0\n\n\ndef download_cas():\n \"\"\"下载病例\"\"\"\n enter_download_cas = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/ul/li[2]').click()\n enter_download_cas_1 = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/div/div[2]/div/div[1]/div/button[3]'\n ).click()\n return 0\n\n\ndef resetting_excel(cause, clinique, path='D:\\\\林钟\\\\下载'):\n \"\"\"重命名病例\"\"\"\n try:\n files = os.listdir(path)\n src = path + '\\\\' + '外呼结果导出表格.xlsx'\n if cause == '发热伴畏寒|寒战':\n cause = \"发热伴畏寒寒战'\"\n if cause == '畏寒|寒战':\n cause = \"畏寒寒战'\"\n dst = path + '\\\\' + clinique + '--' + cause + '.xlsx'\n os.rename(src, dst)\n except FileExistsError:\n files = os.listdir(path)\n src = path + '\\\\' + '外呼结果导出表格.xlsx'\n if cause == '发热伴畏寒|寒战':\n cause = \"发热伴畏寒寒战'\"\n if cause == '畏寒|寒战':\n cause = \"畏寒寒战'\"\n dst = path + '\\\\' + clinique + '--' + cause + '.xlsx'\n os.remove(dst)\n os.rename(src, dst)\n return 0\n\n\ndef pagination():\n pagination__total = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[5]/span[1]')\n a = int(pagination__total.text[2:-2])\n return a\n\n\ndef search_data(cause, city, area, clinique, excel_time):\n \"\"\"查找数据\"\"\"\n ls_2 = []\n trlist_table_on = web.find_element_by_class_name('is-scrolling-none')\n trlist_table = trlist_table_on.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n for row in range(len(trlist_tr)):\n trlist_table = web.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n i = 0\n j = 0\n ls = []\n for col in range(len(trlist_td)):\n i += 1\n if i == 2:\n ls.append(trlist_td[col].text)\n elif i == 3:\n ls.append(trlist_td[col].text)\n elif i == 7:\n ls.append(trlist_td[col])\n elif i == 9:\n j = 1\n ls.append(trlist_td[col])\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n if ls[0] == cause:\n if ls[1] == '安徽省/' + city + '/' + area + '/' + clinique:\n if j == 0:\n ls_2.append(ls[2])\n elif j == 1:\n ls_2.append(ls[3])\n return ls_2\n\n\ndef search_data_down(cause, clinique, path):\n \"\"\"找到病例后的对病例进行一系列的处理\"\"\"\n \"\"\"下载病例\"\"\"\n download_cas()\n \"\"\"返回上一界面\"\"\"\n web.back()\n \"\"\"点击重置\"\"\"\n reset()\n \"\"\"点击待完成\"\"\"\n pending()\n \"\"\"给病例重命名\"\"\"\n time.sleep(2)\n try:\n resetting_excel(cause, clinique, path)\n except FileNotFoundError:\n time.sleep(2)\n resetting_excel(cause, clinique, path)\n print(clinique + '--' + cause + '已下载完成!')\n\n\ndef tourne_page():\n enter_tourne_page = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[5]/button[2]/i').click()\n return ''\n\n\ndef search_data_on(cause, city, area, clinique, excel_time, path):\n \"\"\"核心处理流程\"\"\"\n time.sleep(2)\n number = pagination()\n \"\"\"判断待处理下标是否为0\"\"\"\n if number == 0:\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"判断待处理里是否小于20条数据\"\"\"\n if 0 < number <= 20:\n \"\"\"如果小于进行查找\"\"\"\n pending__search_data = search_data(cause, city, area, clinique,\n excel_time)\n \"\"\"判断有没有找到\"\"\"\n if len(pending__search_data) == 0:\n \"\"\"没找到\"\"\"\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause,\n city, area, clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' +\n cause + ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到了\"\"\"\n pending__search_data[0].click()\n search_data_down(cause, clinique, path)\n\n\n<code token>\n", "<import token>\n\n\ndef deconnexion(Chrome):\n \"\"\"登陆\"\"\"\n \"\"\"初始化\"\"\"\n global web, actions\n web = webdriver.Chrome(Chrome)\n web.maximize_window()\n web.implicitly_wait(10)\n web.get(\n 'http://www.wjw-cdc.com:8003/user-center-portal/login?redirect=%2Fmain'\n )\n actions = ActionChains(web)\n \"\"\"登录网页\"\"\"\n username = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[1]/div/div[1]/input')\n password = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[2]/div/div[2]/input')\n username.send_keys('15375429564')\n password.send_keys('cdc1234cdc')\n enter = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[3]/div/button')\n enter.click()\n return 0\n\n\ndef menu_lien():\n \"\"\"跳转页面\"\"\"\n enter_into = web.find_element_by_xpath(\n '/html/body/div[1]/div/div[2]/section/div/div[1]/ul/li[1]/ul/li[2]/span/div[2]/section/article'\n )\n enter_into.click()\n return 0\n\n\ndef confirm_area(city, area):\n \"\"\"确定区域\"\"\"\n \"\"\"点击区域\"\"\"\n enter_area = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/div[1]/span/div/div[1]/input'\n ).click()\n \"\"\"点击安徽省\"\"\"\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n try:\n enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'\n )\n enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(\n 'el-scrollbar__view')\n except:\n time.sleep(1)\n enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'\n )\n enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(\n 'el-scrollbar__view')\n enter_AnHui = enter_AnHui_on.find_element_by_tag_name('li')\n enter_AnHui_down = enter_AnHui.find_element_by_class_name('el-radio__input'\n )\n web.execute_script('arguments[0].click();', enter_AnHui_down)\n \"\"\"选择城市\"\"\"\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n try:\n enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n except:\n time.sleep(1)\n enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_city = enter_city_on.find_elements_by_tag_name('li')\n for i in range(len(enter_city)):\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_city = enter_city_on.find_elements_by_tag_name('li')\n if enter_city[i].text == city:\n enter_city_down = enter_city[i].find_element_by_class_name(\n 'el-radio__input')\n web.execute_script('arguments[0].click();', enter_city_down)\n break\n \"\"\"选则区县\"\"\"\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n try:\n enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n except:\n time.sleep(1)\n enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_area = enter_area_on.find_elements_by_tag_name('li')\n for i in range(len(enter_area)):\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_area = enter_area_on.find_elements_by_tag_name('li')\n if enter_area[i].text == area:\n enter_area_down = enter_area[i].find_element_by_class_name(\n 'el-radio__input')\n web.execute_script('arguments[0].click();', enter_area_down)\n break\n return 0\n\n\ndef confirm_time_on(excel_time):\n if type(excel_time) == str:\n return str(excel_time)\n elif type(excel_time) == datetime.datetime:\n excel_time_2 = excel_time.strftime('%Y-%m-%d')\n return str(excel_time_2)\n\n\ndef confirm_tiem(time):\n \"\"\"确定时间\"\"\"\n time = confirm_time_on(time)\n enter_time = web.find_elements_by_class_name('el-range-input')\n for i in enter_time:\n i.send_keys(time)\n return 0\n\n\ndef confirm_cause(cause):\n \"\"\"选则症状\"\"\"\n enter_symptom = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/div[3]/span/div/div[2]/input'\n ).click()\n enter_on = web.find_element_by_class_name('is-multiple')\n enter_on_1 = enter_on.find_element_by_class_name('el-scrollbar')\n enter_on_symptom = enter_on_1.find_elements_by_tag_name('li')\n for i in range(len(enter_on_symptom)):\n enter_on = web.find_element_by_class_name('is-multiple')\n enter_on_symptom = enter_on.find_elements_by_tag_name('li')\n if enter_on_symptom[i].text == cause:\n enter_on_symptom[i].click()\n break\n return 0\n\n\ndef search():\n \"\"\"点击搜索\"\"\"\n enter_search = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/button[1]').click()\n return 0\n\n\ndef reset():\n \"\"\"点击重置\"\"\"\n enter_reset = web.find_element_by_xpath(\n '/html/body/div/section/main/div/div[3]/button[2]').click()\n return 0\n\n\ndef pending():\n \"\"\"待处理\"\"\"\n enter_pending = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[1]'\n ).click()\n return 0\n\n\ndef accomplish():\n \"\"\"已完成\"\"\"\n enter__accomplish = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[3]'\n ).click()\n return 0\n\n\ndef download_cas():\n \"\"\"下载病例\"\"\"\n enter_download_cas = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/ul/li[2]').click()\n enter_download_cas_1 = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/div/div[2]/div/div[1]/div/button[3]'\n ).click()\n return 0\n\n\ndef resetting_excel(cause, clinique, path='D:\\\\林钟\\\\下载'):\n \"\"\"重命名病例\"\"\"\n try:\n files = os.listdir(path)\n src = path + '\\\\' + '外呼结果导出表格.xlsx'\n if cause == '发热伴畏寒|寒战':\n cause = \"发热伴畏寒寒战'\"\n if cause == '畏寒|寒战':\n cause = \"畏寒寒战'\"\n dst = path + '\\\\' + clinique + '--' + cause + '.xlsx'\n os.rename(src, dst)\n except FileExistsError:\n files = os.listdir(path)\n src = path + '\\\\' + '外呼结果导出表格.xlsx'\n if cause == '发热伴畏寒|寒战':\n cause = \"发热伴畏寒寒战'\"\n if cause == '畏寒|寒战':\n cause = \"畏寒寒战'\"\n dst = path + '\\\\' + clinique + '--' + cause + '.xlsx'\n os.remove(dst)\n os.rename(src, dst)\n return 0\n\n\n<function token>\n\n\ndef search_data(cause, city, area, clinique, excel_time):\n \"\"\"查找数据\"\"\"\n ls_2 = []\n trlist_table_on = web.find_element_by_class_name('is-scrolling-none')\n trlist_table = trlist_table_on.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n for row in range(len(trlist_tr)):\n trlist_table = web.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n i = 0\n j = 0\n ls = []\n for col in range(len(trlist_td)):\n i += 1\n if i == 2:\n ls.append(trlist_td[col].text)\n elif i == 3:\n ls.append(trlist_td[col].text)\n elif i == 7:\n ls.append(trlist_td[col])\n elif i == 9:\n j = 1\n ls.append(trlist_td[col])\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n if ls[0] == cause:\n if ls[1] == '安徽省/' + city + '/' + area + '/' + clinique:\n if j == 0:\n ls_2.append(ls[2])\n elif j == 1:\n ls_2.append(ls[3])\n return ls_2\n\n\ndef search_data_down(cause, clinique, path):\n \"\"\"找到病例后的对病例进行一系列的处理\"\"\"\n \"\"\"下载病例\"\"\"\n download_cas()\n \"\"\"返回上一界面\"\"\"\n web.back()\n \"\"\"点击重置\"\"\"\n reset()\n \"\"\"点击待完成\"\"\"\n pending()\n \"\"\"给病例重命名\"\"\"\n time.sleep(2)\n try:\n resetting_excel(cause, clinique, path)\n except FileNotFoundError:\n time.sleep(2)\n resetting_excel(cause, clinique, path)\n print(clinique + '--' + cause + '已下载完成!')\n\n\ndef tourne_page():\n enter_tourne_page = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[5]/button[2]/i').click()\n return ''\n\n\ndef search_data_on(cause, city, area, clinique, excel_time, path):\n \"\"\"核心处理流程\"\"\"\n time.sleep(2)\n number = pagination()\n \"\"\"判断待处理下标是否为0\"\"\"\n if number == 0:\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"判断待处理里是否小于20条数据\"\"\"\n if 0 < number <= 20:\n \"\"\"如果小于进行查找\"\"\"\n pending__search_data = search_data(cause, city, area, clinique,\n excel_time)\n \"\"\"判断有没有找到\"\"\"\n if len(pending__search_data) == 0:\n \"\"\"没找到\"\"\"\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause,\n city, area, clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' +\n cause + ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到了\"\"\"\n pending__search_data[0].click()\n search_data_down(cause, clinique, path)\n\n\n<code token>\n", "<import token>\n\n\ndef deconnexion(Chrome):\n \"\"\"登陆\"\"\"\n \"\"\"初始化\"\"\"\n global web, actions\n web = webdriver.Chrome(Chrome)\n web.maximize_window()\n web.implicitly_wait(10)\n web.get(\n 'http://www.wjw-cdc.com:8003/user-center-portal/login?redirect=%2Fmain'\n )\n actions = ActionChains(web)\n \"\"\"登录网页\"\"\"\n username = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[1]/div/div[1]/input')\n password = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[2]/div/div[2]/input')\n username.send_keys('15375429564')\n password.send_keys('cdc1234cdc')\n enter = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[3]/div/button')\n enter.click()\n return 0\n\n\ndef menu_lien():\n \"\"\"跳转页面\"\"\"\n enter_into = web.find_element_by_xpath(\n '/html/body/div[1]/div/div[2]/section/div/div[1]/ul/li[1]/ul/li[2]/span/div[2]/section/article'\n )\n enter_into.click()\n return 0\n\n\ndef confirm_area(city, area):\n \"\"\"确定区域\"\"\"\n \"\"\"点击区域\"\"\"\n enter_area = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/div[1]/span/div/div[1]/input'\n ).click()\n \"\"\"点击安徽省\"\"\"\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n try:\n enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'\n )\n enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(\n 'el-scrollbar__view')\n except:\n time.sleep(1)\n enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'\n )\n enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(\n 'el-scrollbar__view')\n enter_AnHui = enter_AnHui_on.find_element_by_tag_name('li')\n enter_AnHui_down = enter_AnHui.find_element_by_class_name('el-radio__input'\n )\n web.execute_script('arguments[0].click();', enter_AnHui_down)\n \"\"\"选择城市\"\"\"\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n try:\n enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n except:\n time.sleep(1)\n enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_city = enter_city_on.find_elements_by_tag_name('li')\n for i in range(len(enter_city)):\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_city = enter_city_on.find_elements_by_tag_name('li')\n if enter_city[i].text == city:\n enter_city_down = enter_city[i].find_element_by_class_name(\n 'el-radio__input')\n web.execute_script('arguments[0].click();', enter_city_down)\n break\n \"\"\"选则区县\"\"\"\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n try:\n enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n except:\n time.sleep(1)\n enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_area = enter_area_on.find_elements_by_tag_name('li')\n for i in range(len(enter_area)):\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_area = enter_area_on.find_elements_by_tag_name('li')\n if enter_area[i].text == area:\n enter_area_down = enter_area[i].find_element_by_class_name(\n 'el-radio__input')\n web.execute_script('arguments[0].click();', enter_area_down)\n break\n return 0\n\n\ndef confirm_time_on(excel_time):\n if type(excel_time) == str:\n return str(excel_time)\n elif type(excel_time) == datetime.datetime:\n excel_time_2 = excel_time.strftime('%Y-%m-%d')\n return str(excel_time_2)\n\n\ndef confirm_tiem(time):\n \"\"\"确定时间\"\"\"\n time = confirm_time_on(time)\n enter_time = web.find_elements_by_class_name('el-range-input')\n for i in enter_time:\n i.send_keys(time)\n return 0\n\n\ndef confirm_cause(cause):\n \"\"\"选则症状\"\"\"\n enter_symptom = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/div[3]/span/div/div[2]/input'\n ).click()\n enter_on = web.find_element_by_class_name('is-multiple')\n enter_on_1 = enter_on.find_element_by_class_name('el-scrollbar')\n enter_on_symptom = enter_on_1.find_elements_by_tag_name('li')\n for i in range(len(enter_on_symptom)):\n enter_on = web.find_element_by_class_name('is-multiple')\n enter_on_symptom = enter_on.find_elements_by_tag_name('li')\n if enter_on_symptom[i].text == cause:\n enter_on_symptom[i].click()\n break\n return 0\n\n\ndef search():\n \"\"\"点击搜索\"\"\"\n enter_search = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/button[1]').click()\n return 0\n\n\ndef reset():\n \"\"\"点击重置\"\"\"\n enter_reset = web.find_element_by_xpath(\n '/html/body/div/section/main/div/div[3]/button[2]').click()\n return 0\n\n\ndef pending():\n \"\"\"待处理\"\"\"\n enter_pending = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[1]'\n ).click()\n return 0\n\n\ndef accomplish():\n \"\"\"已完成\"\"\"\n enter__accomplish = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[3]'\n ).click()\n return 0\n\n\ndef download_cas():\n \"\"\"下载病例\"\"\"\n enter_download_cas = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/ul/li[2]').click()\n enter_download_cas_1 = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/div/div[2]/div/div[1]/div/button[3]'\n ).click()\n return 0\n\n\n<function token>\n<function token>\n\n\ndef search_data(cause, city, area, clinique, excel_time):\n \"\"\"查找数据\"\"\"\n ls_2 = []\n trlist_table_on = web.find_element_by_class_name('is-scrolling-none')\n trlist_table = trlist_table_on.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n for row in range(len(trlist_tr)):\n trlist_table = web.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n i = 0\n j = 0\n ls = []\n for col in range(len(trlist_td)):\n i += 1\n if i == 2:\n ls.append(trlist_td[col].text)\n elif i == 3:\n ls.append(trlist_td[col].text)\n elif i == 7:\n ls.append(trlist_td[col])\n elif i == 9:\n j = 1\n ls.append(trlist_td[col])\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n if ls[0] == cause:\n if ls[1] == '安徽省/' + city + '/' + area + '/' + clinique:\n if j == 0:\n ls_2.append(ls[2])\n elif j == 1:\n ls_2.append(ls[3])\n return ls_2\n\n\ndef search_data_down(cause, clinique, path):\n \"\"\"找到病例后的对病例进行一系列的处理\"\"\"\n \"\"\"下载病例\"\"\"\n download_cas()\n \"\"\"返回上一界面\"\"\"\n web.back()\n \"\"\"点击重置\"\"\"\n reset()\n \"\"\"点击待完成\"\"\"\n pending()\n \"\"\"给病例重命名\"\"\"\n time.sleep(2)\n try:\n resetting_excel(cause, clinique, path)\n except FileNotFoundError:\n time.sleep(2)\n resetting_excel(cause, clinique, path)\n print(clinique + '--' + cause + '已下载完成!')\n\n\ndef tourne_page():\n enter_tourne_page = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[5]/button[2]/i').click()\n return ''\n\n\ndef search_data_on(cause, city, area, clinique, excel_time, path):\n \"\"\"核心处理流程\"\"\"\n time.sleep(2)\n number = pagination()\n \"\"\"判断待处理下标是否为0\"\"\"\n if number == 0:\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"判断待处理里是否小于20条数据\"\"\"\n if 0 < number <= 20:\n \"\"\"如果小于进行查找\"\"\"\n pending__search_data = search_data(cause, city, area, clinique,\n excel_time)\n \"\"\"判断有没有找到\"\"\"\n if len(pending__search_data) == 0:\n \"\"\"没找到\"\"\"\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause,\n city, area, clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' +\n cause + ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到了\"\"\"\n pending__search_data[0].click()\n search_data_down(cause, clinique, path)\n\n\n<code token>\n", "<import token>\n\n\ndef deconnexion(Chrome):\n \"\"\"登陆\"\"\"\n \"\"\"初始化\"\"\"\n global web, actions\n web = webdriver.Chrome(Chrome)\n web.maximize_window()\n web.implicitly_wait(10)\n web.get(\n 'http://www.wjw-cdc.com:8003/user-center-portal/login?redirect=%2Fmain'\n )\n actions = ActionChains(web)\n \"\"\"登录网页\"\"\"\n username = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[1]/div/div[1]/input')\n password = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[2]/div/div[2]/input')\n username.send_keys('15375429564')\n password.send_keys('cdc1234cdc')\n enter = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[3]/div/button')\n enter.click()\n return 0\n\n\ndef menu_lien():\n \"\"\"跳转页面\"\"\"\n enter_into = web.find_element_by_xpath(\n '/html/body/div[1]/div/div[2]/section/div/div[1]/ul/li[1]/ul/li[2]/span/div[2]/section/article'\n )\n enter_into.click()\n return 0\n\n\n<function token>\n\n\ndef confirm_time_on(excel_time):\n if type(excel_time) == str:\n return str(excel_time)\n elif type(excel_time) == datetime.datetime:\n excel_time_2 = excel_time.strftime('%Y-%m-%d')\n return str(excel_time_2)\n\n\ndef confirm_tiem(time):\n \"\"\"确定时间\"\"\"\n time = confirm_time_on(time)\n enter_time = web.find_elements_by_class_name('el-range-input')\n for i in enter_time:\n i.send_keys(time)\n return 0\n\n\ndef confirm_cause(cause):\n \"\"\"选则症状\"\"\"\n enter_symptom = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/div[3]/span/div/div[2]/input'\n ).click()\n enter_on = web.find_element_by_class_name('is-multiple')\n enter_on_1 = enter_on.find_element_by_class_name('el-scrollbar')\n enter_on_symptom = enter_on_1.find_elements_by_tag_name('li')\n for i in range(len(enter_on_symptom)):\n enter_on = web.find_element_by_class_name('is-multiple')\n enter_on_symptom = enter_on.find_elements_by_tag_name('li')\n if enter_on_symptom[i].text == cause:\n enter_on_symptom[i].click()\n break\n return 0\n\n\ndef search():\n \"\"\"点击搜索\"\"\"\n enter_search = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/button[1]').click()\n return 0\n\n\ndef reset():\n \"\"\"点击重置\"\"\"\n enter_reset = web.find_element_by_xpath(\n '/html/body/div/section/main/div/div[3]/button[2]').click()\n return 0\n\n\ndef pending():\n \"\"\"待处理\"\"\"\n enter_pending = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[1]'\n ).click()\n return 0\n\n\ndef accomplish():\n \"\"\"已完成\"\"\"\n enter__accomplish = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[3]'\n ).click()\n return 0\n\n\ndef download_cas():\n \"\"\"下载病例\"\"\"\n enter_download_cas = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/ul/li[2]').click()\n enter_download_cas_1 = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/div/div[2]/div/div[1]/div/button[3]'\n ).click()\n return 0\n\n\n<function token>\n<function token>\n\n\ndef search_data(cause, city, area, clinique, excel_time):\n \"\"\"查找数据\"\"\"\n ls_2 = []\n trlist_table_on = web.find_element_by_class_name('is-scrolling-none')\n trlist_table = trlist_table_on.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n for row in range(len(trlist_tr)):\n trlist_table = web.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n i = 0\n j = 0\n ls = []\n for col in range(len(trlist_td)):\n i += 1\n if i == 2:\n ls.append(trlist_td[col].text)\n elif i == 3:\n ls.append(trlist_td[col].text)\n elif i == 7:\n ls.append(trlist_td[col])\n elif i == 9:\n j = 1\n ls.append(trlist_td[col])\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n if ls[0] == cause:\n if ls[1] == '安徽省/' + city + '/' + area + '/' + clinique:\n if j == 0:\n ls_2.append(ls[2])\n elif j == 1:\n ls_2.append(ls[3])\n return ls_2\n\n\ndef search_data_down(cause, clinique, path):\n \"\"\"找到病例后的对病例进行一系列的处理\"\"\"\n \"\"\"下载病例\"\"\"\n download_cas()\n \"\"\"返回上一界面\"\"\"\n web.back()\n \"\"\"点击重置\"\"\"\n reset()\n \"\"\"点击待完成\"\"\"\n pending()\n \"\"\"给病例重命名\"\"\"\n time.sleep(2)\n try:\n resetting_excel(cause, clinique, path)\n except FileNotFoundError:\n time.sleep(2)\n resetting_excel(cause, clinique, path)\n print(clinique + '--' + cause + '已下载完成!')\n\n\ndef tourne_page():\n enter_tourne_page = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[5]/button[2]/i').click()\n return ''\n\n\ndef search_data_on(cause, city, area, clinique, excel_time, path):\n \"\"\"核心处理流程\"\"\"\n time.sleep(2)\n number = pagination()\n \"\"\"判断待处理下标是否为0\"\"\"\n if number == 0:\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"判断待处理里是否小于20条数据\"\"\"\n if 0 < number <= 20:\n \"\"\"如果小于进行查找\"\"\"\n pending__search_data = search_data(cause, city, area, clinique,\n excel_time)\n \"\"\"判断有没有找到\"\"\"\n if len(pending__search_data) == 0:\n \"\"\"没找到\"\"\"\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause,\n city, area, clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' +\n cause + ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到了\"\"\"\n pending__search_data[0].click()\n search_data_down(cause, clinique, path)\n\n\n<code token>\n", "<import token>\n\n\ndef deconnexion(Chrome):\n \"\"\"登陆\"\"\"\n \"\"\"初始化\"\"\"\n global web, actions\n web = webdriver.Chrome(Chrome)\n web.maximize_window()\n web.implicitly_wait(10)\n web.get(\n 'http://www.wjw-cdc.com:8003/user-center-portal/login?redirect=%2Fmain'\n )\n actions = ActionChains(web)\n \"\"\"登录网页\"\"\"\n username = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[1]/div/div[1]/input')\n password = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[2]/div/div[2]/input')\n username.send_keys('15375429564')\n password.send_keys('cdc1234cdc')\n enter = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[3]/div/button')\n enter.click()\n return 0\n\n\ndef menu_lien():\n \"\"\"跳转页面\"\"\"\n enter_into = web.find_element_by_xpath(\n '/html/body/div[1]/div/div[2]/section/div/div[1]/ul/li[1]/ul/li[2]/span/div[2]/section/article'\n )\n enter_into.click()\n return 0\n\n\n<function token>\n\n\ndef confirm_time_on(excel_time):\n if type(excel_time) == str:\n return str(excel_time)\n elif type(excel_time) == datetime.datetime:\n excel_time_2 = excel_time.strftime('%Y-%m-%d')\n return str(excel_time_2)\n\n\ndef confirm_tiem(time):\n \"\"\"确定时间\"\"\"\n time = confirm_time_on(time)\n enter_time = web.find_elements_by_class_name('el-range-input')\n for i in enter_time:\n i.send_keys(time)\n return 0\n\n\ndef confirm_cause(cause):\n \"\"\"选则症状\"\"\"\n enter_symptom = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/div[3]/span/div/div[2]/input'\n ).click()\n enter_on = web.find_element_by_class_name('is-multiple')\n enter_on_1 = enter_on.find_element_by_class_name('el-scrollbar')\n enter_on_symptom = enter_on_1.find_elements_by_tag_name('li')\n for i in range(len(enter_on_symptom)):\n enter_on = web.find_element_by_class_name('is-multiple')\n enter_on_symptom = enter_on.find_elements_by_tag_name('li')\n if enter_on_symptom[i].text == cause:\n enter_on_symptom[i].click()\n break\n return 0\n\n\ndef search():\n \"\"\"点击搜索\"\"\"\n enter_search = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/button[1]').click()\n return 0\n\n\ndef reset():\n \"\"\"点击重置\"\"\"\n enter_reset = web.find_element_by_xpath(\n '/html/body/div/section/main/div/div[3]/button[2]').click()\n return 0\n\n\ndef pending():\n \"\"\"待处理\"\"\"\n enter_pending = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[1]'\n ).click()\n return 0\n\n\ndef accomplish():\n \"\"\"已完成\"\"\"\n enter__accomplish = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[3]'\n ).click()\n return 0\n\n\ndef download_cas():\n \"\"\"下载病例\"\"\"\n enter_download_cas = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/ul/li[2]').click()\n enter_download_cas_1 = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/div/div[2]/div/div[1]/div/button[3]'\n ).click()\n return 0\n\n\n<function token>\n<function token>\n\n\ndef search_data(cause, city, area, clinique, excel_time):\n \"\"\"查找数据\"\"\"\n ls_2 = []\n trlist_table_on = web.find_element_by_class_name('is-scrolling-none')\n trlist_table = trlist_table_on.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n for row in range(len(trlist_tr)):\n trlist_table = web.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n i = 0\n j = 0\n ls = []\n for col in range(len(trlist_td)):\n i += 1\n if i == 2:\n ls.append(trlist_td[col].text)\n elif i == 3:\n ls.append(trlist_td[col].text)\n elif i == 7:\n ls.append(trlist_td[col])\n elif i == 9:\n j = 1\n ls.append(trlist_td[col])\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n if ls[0] == cause:\n if ls[1] == '安徽省/' + city + '/' + area + '/' + clinique:\n if j == 0:\n ls_2.append(ls[2])\n elif j == 1:\n ls_2.append(ls[3])\n return ls_2\n\n\n<function token>\n\n\ndef tourne_page():\n enter_tourne_page = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[5]/button[2]/i').click()\n return ''\n\n\ndef search_data_on(cause, city, area, clinique, excel_time, path):\n \"\"\"核心处理流程\"\"\"\n time.sleep(2)\n number = pagination()\n \"\"\"判断待处理下标是否为0\"\"\"\n if number == 0:\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"判断待处理里是否小于20条数据\"\"\"\n if 0 < number <= 20:\n \"\"\"如果小于进行查找\"\"\"\n pending__search_data = search_data(cause, city, area, clinique,\n excel_time)\n \"\"\"判断有没有找到\"\"\"\n if len(pending__search_data) == 0:\n \"\"\"没找到\"\"\"\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause,\n city, area, clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' +\n cause + ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到了\"\"\"\n pending__search_data[0].click()\n search_data_down(cause, clinique, path)\n\n\n<code token>\n", "<import token>\n<function token>\n\n\ndef menu_lien():\n \"\"\"跳转页面\"\"\"\n enter_into = web.find_element_by_xpath(\n '/html/body/div[1]/div/div[2]/section/div/div[1]/ul/li[1]/ul/li[2]/span/div[2]/section/article'\n )\n enter_into.click()\n return 0\n\n\n<function token>\n\n\ndef confirm_time_on(excel_time):\n if type(excel_time) == str:\n return str(excel_time)\n elif type(excel_time) == datetime.datetime:\n excel_time_2 = excel_time.strftime('%Y-%m-%d')\n return str(excel_time_2)\n\n\ndef confirm_tiem(time):\n \"\"\"确定时间\"\"\"\n time = confirm_time_on(time)\n enter_time = web.find_elements_by_class_name('el-range-input')\n for i in enter_time:\n i.send_keys(time)\n return 0\n\n\ndef confirm_cause(cause):\n \"\"\"选则症状\"\"\"\n enter_symptom = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/div[3]/span/div/div[2]/input'\n ).click()\n enter_on = web.find_element_by_class_name('is-multiple')\n enter_on_1 = enter_on.find_element_by_class_name('el-scrollbar')\n enter_on_symptom = enter_on_1.find_elements_by_tag_name('li')\n for i in range(len(enter_on_symptom)):\n enter_on = web.find_element_by_class_name('is-multiple')\n enter_on_symptom = enter_on.find_elements_by_tag_name('li')\n if enter_on_symptom[i].text == cause:\n enter_on_symptom[i].click()\n break\n return 0\n\n\ndef search():\n \"\"\"点击搜索\"\"\"\n enter_search = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/button[1]').click()\n return 0\n\n\ndef reset():\n \"\"\"点击重置\"\"\"\n enter_reset = web.find_element_by_xpath(\n '/html/body/div/section/main/div/div[3]/button[2]').click()\n return 0\n\n\ndef pending():\n \"\"\"待处理\"\"\"\n enter_pending = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[1]'\n ).click()\n return 0\n\n\ndef accomplish():\n \"\"\"已完成\"\"\"\n enter__accomplish = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[3]'\n ).click()\n return 0\n\n\ndef download_cas():\n \"\"\"下载病例\"\"\"\n enter_download_cas = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/ul/li[2]').click()\n enter_download_cas_1 = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/div/div[2]/div/div[1]/div/button[3]'\n ).click()\n return 0\n\n\n<function token>\n<function token>\n\n\ndef search_data(cause, city, area, clinique, excel_time):\n \"\"\"查找数据\"\"\"\n ls_2 = []\n trlist_table_on = web.find_element_by_class_name('is-scrolling-none')\n trlist_table = trlist_table_on.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n for row in range(len(trlist_tr)):\n trlist_table = web.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n i = 0\n j = 0\n ls = []\n for col in range(len(trlist_td)):\n i += 1\n if i == 2:\n ls.append(trlist_td[col].text)\n elif i == 3:\n ls.append(trlist_td[col].text)\n elif i == 7:\n ls.append(trlist_td[col])\n elif i == 9:\n j = 1\n ls.append(trlist_td[col])\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n if ls[0] == cause:\n if ls[1] == '安徽省/' + city + '/' + area + '/' + clinique:\n if j == 0:\n ls_2.append(ls[2])\n elif j == 1:\n ls_2.append(ls[3])\n return ls_2\n\n\n<function token>\n\n\ndef tourne_page():\n enter_tourne_page = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[5]/button[2]/i').click()\n return ''\n\n\ndef search_data_on(cause, city, area, clinique, excel_time, path):\n \"\"\"核心处理流程\"\"\"\n time.sleep(2)\n number = pagination()\n \"\"\"判断待处理下标是否为0\"\"\"\n if number == 0:\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"判断待处理里是否小于20条数据\"\"\"\n if 0 < number <= 20:\n \"\"\"如果小于进行查找\"\"\"\n pending__search_data = search_data(cause, city, area, clinique,\n excel_time)\n \"\"\"判断有没有找到\"\"\"\n if len(pending__search_data) == 0:\n \"\"\"没找到\"\"\"\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause,\n city, area, clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' +\n cause + ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到了\"\"\"\n pending__search_data[0].click()\n search_data_down(cause, clinique, path)\n\n\n<code token>\n", "<import token>\n<function token>\n\n\ndef menu_lien():\n \"\"\"跳转页面\"\"\"\n enter_into = web.find_element_by_xpath(\n '/html/body/div[1]/div/div[2]/section/div/div[1]/ul/li[1]/ul/li[2]/span/div[2]/section/article'\n )\n enter_into.click()\n return 0\n\n\n<function token>\n\n\ndef confirm_time_on(excel_time):\n if type(excel_time) == str:\n return str(excel_time)\n elif type(excel_time) == datetime.datetime:\n excel_time_2 = excel_time.strftime('%Y-%m-%d')\n return str(excel_time_2)\n\n\n<function token>\n\n\ndef confirm_cause(cause):\n \"\"\"选则症状\"\"\"\n enter_symptom = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/div[3]/span/div/div[2]/input'\n ).click()\n enter_on = web.find_element_by_class_name('is-multiple')\n enter_on_1 = enter_on.find_element_by_class_name('el-scrollbar')\n enter_on_symptom = enter_on_1.find_elements_by_tag_name('li')\n for i in range(len(enter_on_symptom)):\n enter_on = web.find_element_by_class_name('is-multiple')\n enter_on_symptom = enter_on.find_elements_by_tag_name('li')\n if enter_on_symptom[i].text == cause:\n enter_on_symptom[i].click()\n break\n return 0\n\n\ndef search():\n \"\"\"点击搜索\"\"\"\n enter_search = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/button[1]').click()\n return 0\n\n\ndef reset():\n \"\"\"点击重置\"\"\"\n enter_reset = web.find_element_by_xpath(\n '/html/body/div/section/main/div/div[3]/button[2]').click()\n return 0\n\n\ndef pending():\n \"\"\"待处理\"\"\"\n enter_pending = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[1]'\n ).click()\n return 0\n\n\ndef accomplish():\n \"\"\"已完成\"\"\"\n enter__accomplish = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[3]'\n ).click()\n return 0\n\n\ndef download_cas():\n \"\"\"下载病例\"\"\"\n enter_download_cas = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/ul/li[2]').click()\n enter_download_cas_1 = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/div/div[2]/div/div[1]/div/button[3]'\n ).click()\n return 0\n\n\n<function token>\n<function token>\n\n\ndef search_data(cause, city, area, clinique, excel_time):\n \"\"\"查找数据\"\"\"\n ls_2 = []\n trlist_table_on = web.find_element_by_class_name('is-scrolling-none')\n trlist_table = trlist_table_on.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n for row in range(len(trlist_tr)):\n trlist_table = web.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n i = 0\n j = 0\n ls = []\n for col in range(len(trlist_td)):\n i += 1\n if i == 2:\n ls.append(trlist_td[col].text)\n elif i == 3:\n ls.append(trlist_td[col].text)\n elif i == 7:\n ls.append(trlist_td[col])\n elif i == 9:\n j = 1\n ls.append(trlist_td[col])\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n if ls[0] == cause:\n if ls[1] == '安徽省/' + city + '/' + area + '/' + clinique:\n if j == 0:\n ls_2.append(ls[2])\n elif j == 1:\n ls_2.append(ls[3])\n return ls_2\n\n\n<function token>\n\n\ndef tourne_page():\n enter_tourne_page = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[5]/button[2]/i').click()\n return ''\n\n\ndef search_data_on(cause, city, area, clinique, excel_time, path):\n \"\"\"核心处理流程\"\"\"\n time.sleep(2)\n number = pagination()\n \"\"\"判断待处理下标是否为0\"\"\"\n if number == 0:\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"判断待处理里是否小于20条数据\"\"\"\n if 0 < number <= 20:\n \"\"\"如果小于进行查找\"\"\"\n pending__search_data = search_data(cause, city, area, clinique,\n excel_time)\n \"\"\"判断有没有找到\"\"\"\n if len(pending__search_data) == 0:\n \"\"\"没找到\"\"\"\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause,\n city, area, clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' +\n cause + ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到了\"\"\"\n pending__search_data[0].click()\n search_data_down(cause, clinique, path)\n\n\n<code token>\n", "<import token>\n<function token>\n\n\ndef menu_lien():\n \"\"\"跳转页面\"\"\"\n enter_into = web.find_element_by_xpath(\n '/html/body/div[1]/div/div[2]/section/div/div[1]/ul/li[1]/ul/li[2]/span/div[2]/section/article'\n )\n enter_into.click()\n return 0\n\n\n<function token>\n\n\ndef confirm_time_on(excel_time):\n if type(excel_time) == str:\n return str(excel_time)\n elif type(excel_time) == datetime.datetime:\n excel_time_2 = excel_time.strftime('%Y-%m-%d')\n return str(excel_time_2)\n\n\n<function token>\n<function token>\n\n\ndef search():\n \"\"\"点击搜索\"\"\"\n enter_search = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/button[1]').click()\n return 0\n\n\ndef reset():\n \"\"\"点击重置\"\"\"\n enter_reset = web.find_element_by_xpath(\n '/html/body/div/section/main/div/div[3]/button[2]').click()\n return 0\n\n\ndef pending():\n \"\"\"待处理\"\"\"\n enter_pending = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[1]'\n ).click()\n return 0\n\n\ndef accomplish():\n \"\"\"已完成\"\"\"\n enter__accomplish = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[3]'\n ).click()\n return 0\n\n\ndef download_cas():\n \"\"\"下载病例\"\"\"\n enter_download_cas = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/ul/li[2]').click()\n enter_download_cas_1 = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/div/div[2]/div/div[1]/div/button[3]'\n ).click()\n return 0\n\n\n<function token>\n<function token>\n\n\ndef search_data(cause, city, area, clinique, excel_time):\n \"\"\"查找数据\"\"\"\n ls_2 = []\n trlist_table_on = web.find_element_by_class_name('is-scrolling-none')\n trlist_table = trlist_table_on.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n for row in range(len(trlist_tr)):\n trlist_table = web.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n i = 0\n j = 0\n ls = []\n for col in range(len(trlist_td)):\n i += 1\n if i == 2:\n ls.append(trlist_td[col].text)\n elif i == 3:\n ls.append(trlist_td[col].text)\n elif i == 7:\n ls.append(trlist_td[col])\n elif i == 9:\n j = 1\n ls.append(trlist_td[col])\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n if ls[0] == cause:\n if ls[1] == '安徽省/' + city + '/' + area + '/' + clinique:\n if j == 0:\n ls_2.append(ls[2])\n elif j == 1:\n ls_2.append(ls[3])\n return ls_2\n\n\n<function token>\n\n\ndef tourne_page():\n enter_tourne_page = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[5]/button[2]/i').click()\n return ''\n\n\ndef search_data_on(cause, city, area, clinique, excel_time, path):\n \"\"\"核心处理流程\"\"\"\n time.sleep(2)\n number = pagination()\n \"\"\"判断待处理下标是否为0\"\"\"\n if number == 0:\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"判断待处理里是否小于20条数据\"\"\"\n if 0 < number <= 20:\n \"\"\"如果小于进行查找\"\"\"\n pending__search_data = search_data(cause, city, area, clinique,\n excel_time)\n \"\"\"判断有没有找到\"\"\"\n if len(pending__search_data) == 0:\n \"\"\"没找到\"\"\"\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause,\n city, area, clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' +\n cause + ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到了\"\"\"\n pending__search_data[0].click()\n search_data_down(cause, clinique, path)\n\n\n<code token>\n", "<import token>\n<function token>\n\n\ndef menu_lien():\n \"\"\"跳转页面\"\"\"\n enter_into = web.find_element_by_xpath(\n '/html/body/div[1]/div/div[2]/section/div/div[1]/ul/li[1]/ul/li[2]/span/div[2]/section/article'\n )\n enter_into.click()\n return 0\n\n\n<function token>\n\n\ndef confirm_time_on(excel_time):\n if type(excel_time) == str:\n return str(excel_time)\n elif type(excel_time) == datetime.datetime:\n excel_time_2 = excel_time.strftime('%Y-%m-%d')\n return str(excel_time_2)\n\n\n<function token>\n<function token>\n\n\ndef search():\n \"\"\"点击搜索\"\"\"\n enter_search = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/button[1]').click()\n return 0\n\n\ndef reset():\n \"\"\"点击重置\"\"\"\n enter_reset = web.find_element_by_xpath(\n '/html/body/div/section/main/div/div[3]/button[2]').click()\n return 0\n\n\n<function token>\n\n\ndef accomplish():\n \"\"\"已完成\"\"\"\n enter__accomplish = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[3]'\n ).click()\n return 0\n\n\ndef download_cas():\n \"\"\"下载病例\"\"\"\n enter_download_cas = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/ul/li[2]').click()\n enter_download_cas_1 = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/div/div[2]/div/div[1]/div/button[3]'\n ).click()\n return 0\n\n\n<function token>\n<function token>\n\n\ndef search_data(cause, city, area, clinique, excel_time):\n \"\"\"查找数据\"\"\"\n ls_2 = []\n trlist_table_on = web.find_element_by_class_name('is-scrolling-none')\n trlist_table = trlist_table_on.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n for row in range(len(trlist_tr)):\n trlist_table = web.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n i = 0\n j = 0\n ls = []\n for col in range(len(trlist_td)):\n i += 1\n if i == 2:\n ls.append(trlist_td[col].text)\n elif i == 3:\n ls.append(trlist_td[col].text)\n elif i == 7:\n ls.append(trlist_td[col])\n elif i == 9:\n j = 1\n ls.append(trlist_td[col])\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n if ls[0] == cause:\n if ls[1] == '安徽省/' + city + '/' + area + '/' + clinique:\n if j == 0:\n ls_2.append(ls[2])\n elif j == 1:\n ls_2.append(ls[3])\n return ls_2\n\n\n<function token>\n\n\ndef tourne_page():\n enter_tourne_page = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[5]/button[2]/i').click()\n return ''\n\n\ndef search_data_on(cause, city, area, clinique, excel_time, path):\n \"\"\"核心处理流程\"\"\"\n time.sleep(2)\n number = pagination()\n \"\"\"判断待处理下标是否为0\"\"\"\n if number == 0:\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"判断待处理里是否小于20条数据\"\"\"\n if 0 < number <= 20:\n \"\"\"如果小于进行查找\"\"\"\n pending__search_data = search_data(cause, city, area, clinique,\n excel_time)\n \"\"\"判断有没有找到\"\"\"\n if len(pending__search_data) == 0:\n \"\"\"没找到\"\"\"\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause,\n city, area, clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' +\n cause + ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到了\"\"\"\n pending__search_data[0].click()\n search_data_down(cause, clinique, path)\n\n\n<code token>\n", "<import token>\n<function token>\n\n\ndef menu_lien():\n \"\"\"跳转页面\"\"\"\n enter_into = web.find_element_by_xpath(\n '/html/body/div[1]/div/div[2]/section/div/div[1]/ul/li[1]/ul/li[2]/span/div[2]/section/article'\n )\n enter_into.click()\n return 0\n\n\n<function token>\n\n\ndef confirm_time_on(excel_time):\n if type(excel_time) == str:\n return str(excel_time)\n elif type(excel_time) == datetime.datetime:\n excel_time_2 = excel_time.strftime('%Y-%m-%d')\n return str(excel_time_2)\n\n\n<function token>\n<function token>\n\n\ndef search():\n \"\"\"点击搜索\"\"\"\n enter_search = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/button[1]').click()\n return 0\n\n\n<function token>\n<function token>\n\n\ndef accomplish():\n \"\"\"已完成\"\"\"\n enter__accomplish = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[3]'\n ).click()\n return 0\n\n\ndef download_cas():\n \"\"\"下载病例\"\"\"\n enter_download_cas = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/ul/li[2]').click()\n enter_download_cas_1 = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/div/div[2]/div/div[1]/div/button[3]'\n ).click()\n return 0\n\n\n<function token>\n<function token>\n\n\ndef search_data(cause, city, area, clinique, excel_time):\n \"\"\"查找数据\"\"\"\n ls_2 = []\n trlist_table_on = web.find_element_by_class_name('is-scrolling-none')\n trlist_table = trlist_table_on.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n for row in range(len(trlist_tr)):\n trlist_table = web.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n i = 0\n j = 0\n ls = []\n for col in range(len(trlist_td)):\n i += 1\n if i == 2:\n ls.append(trlist_td[col].text)\n elif i == 3:\n ls.append(trlist_td[col].text)\n elif i == 7:\n ls.append(trlist_td[col])\n elif i == 9:\n j = 1\n ls.append(trlist_td[col])\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n if ls[0] == cause:\n if ls[1] == '安徽省/' + city + '/' + area + '/' + clinique:\n if j == 0:\n ls_2.append(ls[2])\n elif j == 1:\n ls_2.append(ls[3])\n return ls_2\n\n\n<function token>\n\n\ndef tourne_page():\n enter_tourne_page = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[5]/button[2]/i').click()\n return ''\n\n\ndef search_data_on(cause, city, area, clinique, excel_time, path):\n \"\"\"核心处理流程\"\"\"\n time.sleep(2)\n number = pagination()\n \"\"\"判断待处理下标是否为0\"\"\"\n if number == 0:\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"判断待处理里是否小于20条数据\"\"\"\n if 0 < number <= 20:\n \"\"\"如果小于进行查找\"\"\"\n pending__search_data = search_data(cause, city, area, clinique,\n excel_time)\n \"\"\"判断有没有找到\"\"\"\n if len(pending__search_data) == 0:\n \"\"\"没找到\"\"\"\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause,\n city, area, clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' +\n cause + ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到了\"\"\"\n pending__search_data[0].click()\n search_data_down(cause, clinique, path)\n\n\n<code token>\n", "<import token>\n<function token>\n\n\ndef menu_lien():\n \"\"\"跳转页面\"\"\"\n enter_into = web.find_element_by_xpath(\n '/html/body/div[1]/div/div[2]/section/div/div[1]/ul/li[1]/ul/li[2]/span/div[2]/section/article'\n )\n enter_into.click()\n return 0\n\n\n<function token>\n\n\ndef confirm_time_on(excel_time):\n if type(excel_time) == str:\n return str(excel_time)\n elif type(excel_time) == datetime.datetime:\n excel_time_2 = excel_time.strftime('%Y-%m-%d')\n return str(excel_time_2)\n\n\n<function token>\n<function token>\n\n\ndef search():\n \"\"\"点击搜索\"\"\"\n enter_search = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/button[1]').click()\n return 0\n\n\n<function token>\n<function token>\n\n\ndef accomplish():\n \"\"\"已完成\"\"\"\n enter__accomplish = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[3]'\n ).click()\n return 0\n\n\ndef download_cas():\n \"\"\"下载病例\"\"\"\n enter_download_cas = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/ul/li[2]').click()\n enter_download_cas_1 = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/div/div[2]/div/div[1]/div/button[3]'\n ).click()\n return 0\n\n\n<function token>\n<function token>\n\n\ndef search_data(cause, city, area, clinique, excel_time):\n \"\"\"查找数据\"\"\"\n ls_2 = []\n trlist_table_on = web.find_element_by_class_name('is-scrolling-none')\n trlist_table = trlist_table_on.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n for row in range(len(trlist_tr)):\n trlist_table = web.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n i = 0\n j = 0\n ls = []\n for col in range(len(trlist_td)):\n i += 1\n if i == 2:\n ls.append(trlist_td[col].text)\n elif i == 3:\n ls.append(trlist_td[col].text)\n elif i == 7:\n ls.append(trlist_td[col])\n elif i == 9:\n j = 1\n ls.append(trlist_td[col])\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n if ls[0] == cause:\n if ls[1] == '安徽省/' + city + '/' + area + '/' + clinique:\n if j == 0:\n ls_2.append(ls[2])\n elif j == 1:\n ls_2.append(ls[3])\n return ls_2\n\n\n<function token>\n<function token>\n\n\ndef search_data_on(cause, city, area, clinique, excel_time, path):\n \"\"\"核心处理流程\"\"\"\n time.sleep(2)\n number = pagination()\n \"\"\"判断待处理下标是否为0\"\"\"\n if number == 0:\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"判断待处理里是否小于20条数据\"\"\"\n if 0 < number <= 20:\n \"\"\"如果小于进行查找\"\"\"\n pending__search_data = search_data(cause, city, area, clinique,\n excel_time)\n \"\"\"判断有没有找到\"\"\"\n if len(pending__search_data) == 0:\n \"\"\"没找到\"\"\"\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause,\n city, area, clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' +\n cause + ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到了\"\"\"\n pending__search_data[0].click()\n search_data_down(cause, clinique, path)\n\n\n<code token>\n", "<import token>\n<function token>\n\n\ndef menu_lien():\n \"\"\"跳转页面\"\"\"\n enter_into = web.find_element_by_xpath(\n '/html/body/div[1]/div/div[2]/section/div/div[1]/ul/li[1]/ul/li[2]/span/div[2]/section/article'\n )\n enter_into.click()\n return 0\n\n\n<function token>\n\n\ndef confirm_time_on(excel_time):\n if type(excel_time) == str:\n return str(excel_time)\n elif type(excel_time) == datetime.datetime:\n excel_time_2 = excel_time.strftime('%Y-%m-%d')\n return str(excel_time_2)\n\n\n<function token>\n<function token>\n\n\ndef search():\n \"\"\"点击搜索\"\"\"\n enter_search = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/button[1]').click()\n return 0\n\n\n<function token>\n<function token>\n\n\ndef accomplish():\n \"\"\"已完成\"\"\"\n enter__accomplish = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[3]'\n ).click()\n return 0\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef search_data(cause, city, area, clinique, excel_time):\n \"\"\"查找数据\"\"\"\n ls_2 = []\n trlist_table_on = web.find_element_by_class_name('is-scrolling-none')\n trlist_table = trlist_table_on.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n for row in range(len(trlist_tr)):\n trlist_table = web.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n i = 0\n j = 0\n ls = []\n for col in range(len(trlist_td)):\n i += 1\n if i == 2:\n ls.append(trlist_td[col].text)\n elif i == 3:\n ls.append(trlist_td[col].text)\n elif i == 7:\n ls.append(trlist_td[col])\n elif i == 9:\n j = 1\n ls.append(trlist_td[col])\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n if ls[0] == cause:\n if ls[1] == '安徽省/' + city + '/' + area + '/' + clinique:\n if j == 0:\n ls_2.append(ls[2])\n elif j == 1:\n ls_2.append(ls[3])\n return ls_2\n\n\n<function token>\n<function token>\n\n\ndef search_data_on(cause, city, area, clinique, excel_time, path):\n \"\"\"核心处理流程\"\"\"\n time.sleep(2)\n number = pagination()\n \"\"\"判断待处理下标是否为0\"\"\"\n if number == 0:\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"判断待处理里是否小于20条数据\"\"\"\n if 0 < number <= 20:\n \"\"\"如果小于进行查找\"\"\"\n pending__search_data = search_data(cause, city, area, clinique,\n excel_time)\n \"\"\"判断有没有找到\"\"\"\n if len(pending__search_data) == 0:\n \"\"\"没找到\"\"\"\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause,\n city, area, clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' +\n cause + ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到了\"\"\"\n pending__search_data[0].click()\n search_data_down(cause, clinique, path)\n\n\n<code token>\n", "<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef confirm_time_on(excel_time):\n if type(excel_time) == str:\n return str(excel_time)\n elif type(excel_time) == datetime.datetime:\n excel_time_2 = excel_time.strftime('%Y-%m-%d')\n return str(excel_time_2)\n\n\n<function token>\n<function token>\n\n\ndef search():\n \"\"\"点击搜索\"\"\"\n enter_search = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/button[1]').click()\n return 0\n\n\n<function token>\n<function token>\n\n\ndef accomplish():\n \"\"\"已完成\"\"\"\n enter__accomplish = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[3]'\n ).click()\n return 0\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef search_data(cause, city, area, clinique, excel_time):\n \"\"\"查找数据\"\"\"\n ls_2 = []\n trlist_table_on = web.find_element_by_class_name('is-scrolling-none')\n trlist_table = trlist_table_on.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n for row in range(len(trlist_tr)):\n trlist_table = web.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n i = 0\n j = 0\n ls = []\n for col in range(len(trlist_td)):\n i += 1\n if i == 2:\n ls.append(trlist_td[col].text)\n elif i == 3:\n ls.append(trlist_td[col].text)\n elif i == 7:\n ls.append(trlist_td[col])\n elif i == 9:\n j = 1\n ls.append(trlist_td[col])\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n if ls[0] == cause:\n if ls[1] == '安徽省/' + city + '/' + area + '/' + clinique:\n if j == 0:\n ls_2.append(ls[2])\n elif j == 1:\n ls_2.append(ls[3])\n return ls_2\n\n\n<function token>\n<function token>\n\n\ndef search_data_on(cause, city, area, clinique, excel_time, path):\n \"\"\"核心处理流程\"\"\"\n time.sleep(2)\n number = pagination()\n \"\"\"判断待处理下标是否为0\"\"\"\n if number == 0:\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"判断待处理里是否小于20条数据\"\"\"\n if 0 < number <= 20:\n \"\"\"如果小于进行查找\"\"\"\n pending__search_data = search_data(cause, city, area, clinique,\n excel_time)\n \"\"\"判断有没有找到\"\"\"\n if len(pending__search_data) == 0:\n \"\"\"没找到\"\"\"\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause,\n city, area, clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' +\n cause + ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到了\"\"\"\n pending__search_data[0].click()\n search_data_down(cause, clinique, path)\n\n\n<code token>\n", "<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef search():\n \"\"\"点击搜索\"\"\"\n enter_search = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/button[1]').click()\n return 0\n\n\n<function token>\n<function token>\n\n\ndef accomplish():\n \"\"\"已完成\"\"\"\n enter__accomplish = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[3]'\n ).click()\n return 0\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef search_data(cause, city, area, clinique, excel_time):\n \"\"\"查找数据\"\"\"\n ls_2 = []\n trlist_table_on = web.find_element_by_class_name('is-scrolling-none')\n trlist_table = trlist_table_on.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n for row in range(len(trlist_tr)):\n trlist_table = web.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n i = 0\n j = 0\n ls = []\n for col in range(len(trlist_td)):\n i += 1\n if i == 2:\n ls.append(trlist_td[col].text)\n elif i == 3:\n ls.append(trlist_td[col].text)\n elif i == 7:\n ls.append(trlist_td[col])\n elif i == 9:\n j = 1\n ls.append(trlist_td[col])\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n if ls[0] == cause:\n if ls[1] == '安徽省/' + city + '/' + area + '/' + clinique:\n if j == 0:\n ls_2.append(ls[2])\n elif j == 1:\n ls_2.append(ls[3])\n return ls_2\n\n\n<function token>\n<function token>\n\n\ndef search_data_on(cause, city, area, clinique, excel_time, path):\n \"\"\"核心处理流程\"\"\"\n time.sleep(2)\n number = pagination()\n \"\"\"判断待处理下标是否为0\"\"\"\n if number == 0:\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"判断待处理里是否小于20条数据\"\"\"\n if 0 < number <= 20:\n \"\"\"如果小于进行查找\"\"\"\n pending__search_data = search_data(cause, city, area, clinique,\n excel_time)\n \"\"\"判断有没有找到\"\"\"\n if len(pending__search_data) == 0:\n \"\"\"没找到\"\"\"\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause,\n city, area, clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' +\n cause + ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到了\"\"\"\n pending__search_data[0].click()\n search_data_down(cause, clinique, path)\n\n\n<code token>\n", "<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef search():\n \"\"\"点击搜索\"\"\"\n enter_search = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/button[1]').click()\n return 0\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef search_data(cause, city, area, clinique, excel_time):\n \"\"\"查找数据\"\"\"\n ls_2 = []\n trlist_table_on = web.find_element_by_class_name('is-scrolling-none')\n trlist_table = trlist_table_on.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n for row in range(len(trlist_tr)):\n trlist_table = web.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n i = 0\n j = 0\n ls = []\n for col in range(len(trlist_td)):\n i += 1\n if i == 2:\n ls.append(trlist_td[col].text)\n elif i == 3:\n ls.append(trlist_td[col].text)\n elif i == 7:\n ls.append(trlist_td[col])\n elif i == 9:\n j = 1\n ls.append(trlist_td[col])\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n if ls[0] == cause:\n if ls[1] == '安徽省/' + city + '/' + area + '/' + clinique:\n if j == 0:\n ls_2.append(ls[2])\n elif j == 1:\n ls_2.append(ls[3])\n return ls_2\n\n\n<function token>\n<function token>\n\n\ndef search_data_on(cause, city, area, clinique, excel_time, path):\n \"\"\"核心处理流程\"\"\"\n time.sleep(2)\n number = pagination()\n \"\"\"判断待处理下标是否为0\"\"\"\n if number == 0:\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"判断待处理里是否小于20条数据\"\"\"\n if 0 < number <= 20:\n \"\"\"如果小于进行查找\"\"\"\n pending__search_data = search_data(cause, city, area, clinique,\n excel_time)\n \"\"\"判断有没有找到\"\"\"\n if len(pending__search_data) == 0:\n \"\"\"没找到\"\"\"\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause,\n city, area, clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' +\n cause + ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到了\"\"\"\n pending__search_data[0].click()\n search_data_down(cause, clinique, path)\n\n\n<code token>\n", "<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef search_data(cause, city, area, clinique, excel_time):\n \"\"\"查找数据\"\"\"\n ls_2 = []\n trlist_table_on = web.find_element_by_class_name('is-scrolling-none')\n trlist_table = trlist_table_on.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n for row in range(len(trlist_tr)):\n trlist_table = web.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n i = 0\n j = 0\n ls = []\n for col in range(len(trlist_td)):\n i += 1\n if i == 2:\n ls.append(trlist_td[col].text)\n elif i == 3:\n ls.append(trlist_td[col].text)\n elif i == 7:\n ls.append(trlist_td[col])\n elif i == 9:\n j = 1\n ls.append(trlist_td[col])\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n if ls[0] == cause:\n if ls[1] == '安徽省/' + city + '/' + area + '/' + clinique:\n if j == 0:\n ls_2.append(ls[2])\n elif j == 1:\n ls_2.append(ls[3])\n return ls_2\n\n\n<function token>\n<function token>\n\n\ndef search_data_on(cause, city, area, clinique, excel_time, path):\n \"\"\"核心处理流程\"\"\"\n time.sleep(2)\n number = pagination()\n \"\"\"判断待处理下标是否为0\"\"\"\n if number == 0:\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"判断待处理里是否小于20条数据\"\"\"\n if 0 < number <= 20:\n \"\"\"如果小于进行查找\"\"\"\n pending__search_data = search_data(cause, city, area, clinique,\n excel_time)\n \"\"\"判断有没有找到\"\"\"\n if len(pending__search_data) == 0:\n \"\"\"没找到\"\"\"\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause,\n city, area, clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' +\n cause + ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到了\"\"\"\n pending__search_data[0].click()\n search_data_down(cause, clinique, path)\n\n\n<code token>\n", "<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef search_data_on(cause, city, area, clinique, excel_time, path):\n \"\"\"核心处理流程\"\"\"\n time.sleep(2)\n number = pagination()\n \"\"\"判断待处理下标是否为0\"\"\"\n if number == 0:\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"判断待处理里是否小于20条数据\"\"\"\n if 0 < number <= 20:\n \"\"\"如果小于进行查找\"\"\"\n pending__search_data = search_data(cause, city, area, clinique,\n excel_time)\n \"\"\"判断有没有找到\"\"\"\n if len(pending__search_data) == 0:\n \"\"\"没找到\"\"\"\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause,\n city, area, clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' +\n cause + ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到了\"\"\"\n pending__search_data[0].click()\n search_data_down(cause, clinique, path)\n\n\n<code token>\n", "<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n" ]
false
322
213ab22a269abc8180524462a8966e5d929ef7d1
import os import json import codecs import markdown from flask import current_app def get_json_file(filename, lang='en'): """ Get the contents of a JSON file. """ filepath = os.path.join(current_app.config['APP_PATH'], 'data', filename) with open(filepath, 'r') as f: return json.loads(f.read()) def get_markdown_file(name, lang='en'): """ Get the contents of a markdown file. """ filename_temp = "{0}_{1}.markdown" md_dir = os.path.join(current_app.config['APP_PATH'], 'markdown') filepath = os.path.join(md_dir, filename_temp.format(name, lang)) if not os.path.isfile(filepath) and lang == 'fr': filepath = os.path.join(md_dir, filename_temp.format(name, 'en')) if not os.path.isfile(filepath): return None with codecs.open(filepath, mode='r', encoding="utf-8") as f: return markdown.markdown(f.read())
[ "import os\nimport json\nimport codecs\n\nimport markdown\n\nfrom flask import current_app\n\n\ndef get_json_file(filename, lang='en'):\n \"\"\"\n Get the contents of a JSON file.\n \"\"\"\n\n filepath = os.path.join(current_app.config['APP_PATH'], 'data', filename)\n\n with open(filepath, 'r') as f:\n return json.loads(f.read())\n\n\ndef get_markdown_file(name, lang='en'):\n \"\"\"\n Get the contents of a markdown file.\n \"\"\"\n\n filename_temp = \"{0}_{1}.markdown\"\n\n md_dir = os.path.join(current_app.config['APP_PATH'], 'markdown')\n\n filepath = os.path.join(md_dir, filename_temp.format(name, lang))\n\n if not os.path.isfile(filepath) and lang == 'fr':\n filepath = os.path.join(md_dir, filename_temp.format(name, 'en'))\n\n if not os.path.isfile(filepath):\n return None\n\n with codecs.open(filepath, mode='r', encoding=\"utf-8\") as f:\n return markdown.markdown(f.read())\n", "import os\nimport json\nimport codecs\nimport markdown\nfrom flask import current_app\n\n\ndef get_json_file(filename, lang='en'):\n \"\"\"\n Get the contents of a JSON file.\n \"\"\"\n filepath = os.path.join(current_app.config['APP_PATH'], 'data', filename)\n with open(filepath, 'r') as f:\n return json.loads(f.read())\n\n\ndef get_markdown_file(name, lang='en'):\n \"\"\"\n Get the contents of a markdown file.\n \"\"\"\n filename_temp = '{0}_{1}.markdown'\n md_dir = os.path.join(current_app.config['APP_PATH'], 'markdown')\n filepath = os.path.join(md_dir, filename_temp.format(name, lang))\n if not os.path.isfile(filepath) and lang == 'fr':\n filepath = os.path.join(md_dir, filename_temp.format(name, 'en'))\n if not os.path.isfile(filepath):\n return None\n with codecs.open(filepath, mode='r', encoding='utf-8') as f:\n return markdown.markdown(f.read())\n", "<import token>\n\n\ndef get_json_file(filename, lang='en'):\n \"\"\"\n Get the contents of a JSON file.\n \"\"\"\n filepath = os.path.join(current_app.config['APP_PATH'], 'data', filename)\n with open(filepath, 'r') as f:\n return json.loads(f.read())\n\n\ndef get_markdown_file(name, lang='en'):\n \"\"\"\n Get the contents of a markdown file.\n \"\"\"\n filename_temp = '{0}_{1}.markdown'\n md_dir = os.path.join(current_app.config['APP_PATH'], 'markdown')\n filepath = os.path.join(md_dir, filename_temp.format(name, lang))\n if not os.path.isfile(filepath) and lang == 'fr':\n filepath = os.path.join(md_dir, filename_temp.format(name, 'en'))\n if not os.path.isfile(filepath):\n return None\n with codecs.open(filepath, mode='r', encoding='utf-8') as f:\n return markdown.markdown(f.read())\n", "<import token>\n\n\ndef get_json_file(filename, lang='en'):\n \"\"\"\n Get the contents of a JSON file.\n \"\"\"\n filepath = os.path.join(current_app.config['APP_PATH'], 'data', filename)\n with open(filepath, 'r') as f:\n return json.loads(f.read())\n\n\n<function token>\n", "<import token>\n<function token>\n<function token>\n" ]
false
323
398263b65fd98003f27020e46ae38e913dc5dd45
#!/usr/bin/env python3 # -*- coding: utf-8 -*- __author__ = 'Brice Chou' import os import lib import sys import time import getopt import training try: import cv2 import h5py except Exception as e: error_info = 'Please install h5py/cv2 tools first. Error: {}.\n'.format(e) print('\033[0;31m%s\033[0m' % error_info) quit() class Usage(Exception): def __init__(self, msg): self.msg = msg def run(): # Set the window name window_name = __author__ # Get a reference to webcam #-1 (the last one) video_capture = cv2.VideoCapture(-1) # Initialize some variables unknown_folder_path = os.path.abspath('unknown') i = lib.get_file_max_number(unknown_folder_path) filerd = h5py.File('database/training_encodings.hdf5', 'r') # Image encodings mode encodings_mode = 'large' # Temp to save predict result name face_names = [] # Save the screen locations and encodings to find a person screen_locations = [] screen_encodings = [] # Save the training data from database training_names = [] training_eigenvalues = [] process_this_frame = True for key in filerd.keys(): training_names.append(filerd[key].name.split('/')[-1]) training_eigenvalues.append(filerd[key].value) filerd.close() while True: # Grab a single frame of video ret, frame = video_capture.read() # Resize frame of video to 1/4 size # for faster face recognition processing small_frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5) # Only process every other frame of video to save time if process_this_frame: # Find all the faces and face encodings # in the current frame of video screen_locations = lib.face_locations(small_frame, 1, 'hog') screen_encodings = lib.face_encodings(small_frame, None, 1, encodings_mode) face_names = [] # How manay faces in the screen detected_face_length = len(screen_locations) info = 'We detected \033[0;32m{}\033[0m faces in the screen.\n' print(info.format(detected_face_length)) if detected_face_length >= 1: for screen_encoding in screen_encodings: # Compare the locations and get the face's name name = lib.compare_faces(training_eigenvalues, training_names, screen_encoding, 0.31) face_names.append(name) # Auto save the unknown images if '' == name: img_file_path = '{}/{}.jpg'.format( unknown_folder_path, i) cv2.imwrite(img_file_path, frame) i += 1 time.sleep(0.15) process_this_frame = not process_this_frame # Display the results for (top, right, bottom, left), name in zip(screen_locations, face_names): # We detected in was scaled to 1/2 size top *= 2 right *= 2 bottom *= 2 left *= 2 # Draw a box around the face cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2) if '' != name: # Draw a label with a name below the face # # cv2.cv.CV_FILLED cv2.rectangle(frame, (left - 60, bottom + 30), (right + 60, bottom - 10), (0, 0, 255), cv2.FILLED) font = cv2.FONT_HERSHEY_DUPLEX cv2.putText(frame, name, (left - 50, bottom + 20), font, 1, (255, 255, 255), 1) # Display the resulting image cv2.namedWindow(window_name, cv2.WND_PROP_FULLSCREEN) # cv2.cv.CV_WINDOW_FULLSCREEN cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) cv2.imshow(window_name, frame) key = cv2.waitKey(1) if key == ord('s'): label = 'cache/{}.jpg'.format(i) cv2.imwrite(label, frame) i += 1 elif key == ord('q'): break # Release handle to the webcam video_capture.release() cv2.destroyAllWindows() def main(argv=None): if argv is None: argv = sys.argv try: try: argv_list = argv[1:] opts, args = getopt.getopt(argv_list, 'h', ['help']) arg = argv_list[0] if 'run' == arg: run() elif 'save' == arg: training.save() elif 'move' == arg: training.move() elif 'detect' == arg: training.detect() elif 'catch' == arg: if 2 == len(argv_list): training.catch(argv_list[1]) else: training.catch() elif 'rotate' == arg: if 2 == len(argv_list): training.rotate(amount=int(argv_list[1])) else: training.rotate() except getopt.error, msg: raise Usage(msg) except Usage, err: print >>sys.stderr, err.msg print >>sys.stderr, 'for help use --help' return 2 if __name__ == '__main__': lib.initial_project_folder() sys.exit(main())
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = 'Brice Chou'\n\nimport os\nimport lib\nimport sys\nimport time\nimport getopt\nimport training\n\ntry:\n import cv2\n import h5py\nexcept Exception as e:\n error_info = 'Please install h5py/cv2 tools first. Error: {}.\\n'.format(e)\n print('\\033[0;31m%s\\033[0m' % error_info)\n quit()\n\n\nclass Usage(Exception):\n def __init__(self, msg):\n self.msg = msg\n\n\ndef run():\n # Set the window name\n window_name = __author__\n\n # Get a reference to webcam #-1 (the last one)\n video_capture = cv2.VideoCapture(-1)\n\n # Initialize some variables\n unknown_folder_path = os.path.abspath('unknown')\n i = lib.get_file_max_number(unknown_folder_path)\n filerd = h5py.File('database/training_encodings.hdf5', 'r')\n\n # Image encodings mode\n encodings_mode = 'large'\n\n # Temp to save predict result name\n face_names = []\n\n # Save the screen locations and encodings to find a person\n screen_locations = []\n screen_encodings = []\n\n # Save the training data from database\n training_names = []\n training_eigenvalues = []\n\n process_this_frame = True\n\n for key in filerd.keys():\n training_names.append(filerd[key].name.split('/')[-1])\n training_eigenvalues.append(filerd[key].value)\n\n filerd.close()\n\n while True:\n # Grab a single frame of video\n ret, frame = video_capture.read()\n\n # Resize frame of video to 1/4 size\n # for faster face recognition processing\n small_frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)\n\n # Only process every other frame of video to save time\n if process_this_frame:\n # Find all the faces and face encodings\n # in the current frame of video\n screen_locations = lib.face_locations(small_frame, 1,\n 'hog')\n screen_encodings = lib.face_encodings(small_frame, None,\n 1, encodings_mode)\n face_names = []\n\n # How manay faces in the screen\n detected_face_length = len(screen_locations)\n info = 'We detected \\033[0;32m{}\\033[0m faces in the screen.\\n'\n print(info.format(detected_face_length))\n if detected_face_length >= 1:\n for screen_encoding in screen_encodings:\n # Compare the locations and get the face's name\n name = lib.compare_faces(training_eigenvalues,\n training_names,\n screen_encoding, 0.31)\n face_names.append(name)\n\n # Auto save the unknown images\n if '' == name:\n img_file_path = '{}/{}.jpg'.format(\n unknown_folder_path, i)\n cv2.imwrite(img_file_path, frame)\n i += 1\n time.sleep(0.15)\n\n process_this_frame = not process_this_frame\n\n # Display the results\n for (top, right, bottom, left), name in zip(screen_locations, face_names):\n # We detected in was scaled to 1/2 size\n top *= 2\n right *= 2\n bottom *= 2\n left *= 2\n\n # Draw a box around the face\n cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)\n\n if '' != name:\n # Draw a label with a name below the face\n # # cv2.cv.CV_FILLED\n cv2.rectangle(frame, (left - 60, bottom + 30),\n (right + 60, bottom - 10), (0, 0, 255),\n cv2.FILLED)\n font = cv2.FONT_HERSHEY_DUPLEX\n cv2.putText(frame, name, (left - 50, bottom + 20),\n font, 1, (255, 255, 255), 1)\n\n # Display the resulting image\n cv2.namedWindow(window_name, cv2.WND_PROP_FULLSCREEN)\n # cv2.cv.CV_WINDOW_FULLSCREEN\n cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN,\n cv2.WINDOW_FULLSCREEN)\n cv2.imshow(window_name, frame)\n\n key = cv2.waitKey(1)\n if key == ord('s'):\n label = 'cache/{}.jpg'.format(i)\n cv2.imwrite(label, frame)\n i += 1\n elif key == ord('q'):\n break\n\n # Release handle to the webcam\n video_capture.release()\n cv2.destroyAllWindows()\n\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv\n try:\n try:\n argv_list = argv[1:]\n opts, args = getopt.getopt(argv_list, 'h', ['help'])\n arg = argv_list[0]\n if 'run' == arg:\n run()\n elif 'save' == arg:\n training.save()\n elif 'move' == arg:\n training.move()\n elif 'detect' == arg:\n training.detect()\n elif 'catch' == arg:\n if 2 == len(argv_list):\n training.catch(argv_list[1])\n else:\n training.catch()\n elif 'rotate' == arg:\n if 2 == len(argv_list):\n training.rotate(amount=int(argv_list[1]))\n else:\n training.rotate()\n except getopt.error, msg:\n raise Usage(msg)\n except Usage, err:\n print >>sys.stderr, err.msg\n print >>sys.stderr, 'for help use --help'\n return 2\n\n\nif __name__ == '__main__':\n lib.initial_project_folder()\n sys.exit(main())\n" ]
true
324
606e40dd073c3efc95ef01a08466fd536a28f140
from slistener import SListener from slistener import track import datetime import time, tweepy, sys import json import re #def tweet_collector(): consumer_key='qpUR91PwjvChszV0VFgrc4Hje' consumer_secret='q9mPUZE2OsFbaqKUF32ZsY1ry4anZ1k8pNSne56wc3HInmERFu' access_token='2845943577-R0g6YRlrdEqSFb2mKy5HXuByQPdpq4TLGrPkmSs' access_token_secret='ed5emUSxHENLtqN8nLYvGkbipKAEemFd0fgjsXNPC8GED' auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) listen = SListener(api) stream = tweepy.Stream(auth, listen) print "Streaming started..." global track try: stream.filter(track = track) except: stream.disconnect()
[ "from slistener import SListener\nfrom slistener import track\nimport datetime\nimport time, tweepy, sys\nimport json\nimport re\n\n#def tweet_collector():\nconsumer_key='qpUR91PwjvChszV0VFgrc4Hje'\nconsumer_secret='q9mPUZE2OsFbaqKUF32ZsY1ry4anZ1k8pNSne56wc3HInmERFu'\naccess_token='2845943577-R0g6YRlrdEqSFb2mKy5HXuByQPdpq4TLGrPkmSs'\naccess_token_secret='ed5emUSxHENLtqN8nLYvGkbipKAEemFd0fgjsXNPC8GED'\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = tweepy.API(auth) \n\nlisten = SListener(api)\nstream = tweepy.Stream(auth, listen)\nprint \"Streaming started...\"\nglobal track \ntry:\n stream.filter(track = track)\nexcept:\n stream.disconnect()\n" ]
true
325
86ca94820c05b3f63f4a733b6d1fa7eb9dea6a5d
# generated from catkin/cmake/template/order_packages.context.py.in source_root_dir = "/home/songsong/image_transport_ws/src" whitelisted_packages = "".split(';') if "" != "" else [] blacklisted_packages = "".split(';') if "" != "" else [] underlay_workspaces = "/home/songsong/image_transport_ws/devel;/home/songsong/pibot_ros/ros_ws/devel;/home/songsong/catkin_ws/devel;/opt/ros/kinetic".split(';') if "/home/songsong/image_transport_ws/devel;/home/songsong/pibot_ros/ros_ws/devel;/home/songsong/catkin_ws/devel;/opt/ros/kinetic" != "" else []
[ "# generated from catkin/cmake/template/order_packages.context.py.in\nsource_root_dir = \"/home/songsong/image_transport_ws/src\"\nwhitelisted_packages = \"\".split(';') if \"\" != \"\" else []\nblacklisted_packages = \"\".split(';') if \"\" != \"\" else []\nunderlay_workspaces = \"/home/songsong/image_transport_ws/devel;/home/songsong/pibot_ros/ros_ws/devel;/home/songsong/catkin_ws/devel;/opt/ros/kinetic\".split(';') if \"/home/songsong/image_transport_ws/devel;/home/songsong/pibot_ros/ros_ws/devel;/home/songsong/catkin_ws/devel;/opt/ros/kinetic\" != \"\" else []\n", "source_root_dir = '/home/songsong/image_transport_ws/src'\nwhitelisted_packages = ''.split(';') if '' != '' else []\nblacklisted_packages = ''.split(';') if '' != '' else []\nunderlay_workspaces = (\n '/home/songsong/image_transport_ws/devel;/home/songsong/pibot_ros/ros_ws/devel;/home/songsong/catkin_ws/devel;/opt/ros/kinetic'\n .split(';') if \n '/home/songsong/image_transport_ws/devel;/home/songsong/pibot_ros/ros_ws/devel;/home/songsong/catkin_ws/devel;/opt/ros/kinetic'\n != '' else [])\n", "<assignment token>\n" ]
false
326
095d7abfc8297e0bf741a4ebb351a7776055623f
''' The previous code does not correcly compute the stiffening coefficients This program uses the clustering data to re-compute the stiffening coefficients ''' import glob import sys import time #-----------------------------------------------------------------------------------# #-----------------------------------------------------------------------------------# #-----------------------------------------------------------------------------------# def LoadClusterHistogram(inputfile): f = open(inputfile) data = [] while True: fields = f.readline().strip().split(',') if len(fields)>1: nAtomsInCluster = float(fields[0]) nClusters = float(fields[1]) data.append((nAtomsInCluster, nClusters)) else: break return data def NIntsBetweenTerminalGroupsMax(nGroups): return nGroups*(nGroups-1)*0.5 def NIntsBetweenTerminalGroupsMin(nGroups): return nGroups - 1 def NTerminalGroupsInCluster(nAtomsInCluster, moltype): nAtomsPerGroup = {'EtOCSMethyl': 1.0, 'EtOCSVinyl': 2.0, 'EtOCSPhenyl': 6.0} return int(nAtomsInCluster/nAtomsPerGroup[moltype]) def ComputeStiffening(data, moltype): # the min and max number of interactions between pairs of terminal groups nAtomIntsPerPairOfGroupsMin = {'EtOCSMethyl': 1, 'EtOCSVinyl': 1, 'EtOCSPhenyl': 4} nAtomIntsPerPairOfGroupsMax = {'EtOCSMethyl': 1, 'EtOCSVinyl': 4, 'EtOCSPhenyl': 36} nStericInteractionsMin = 0 # gamma_min nStericInteractionsMax = 0 # gamma_max for cluster in data: nAtomsInCluster, nClusters = cluster nTerminalGroups = NTerminalGroupsInCluster(nAtomsInCluster, moltype) nGroupIntsMin = NIntsBetweenTerminalGroupsMin(nTerminalGroups) nGroupIntsMax = NIntsBetweenTerminalGroupsMax(nTerminalGroups) nStericInteractionsMin += nGroupIntsMin * nAtomIntsPerPairOfGroupsMin[moltype] * nClusters nStericInteractionsMax += nGroupIntsMax * nAtomIntsPerPairOfGroupsMax[moltype] * nClusters return (nStericInteractionsMin, nStericInteractionsMax) def ComputeStiffeningOH(data): nStericInteractionsMin = 0 # gamma_min nStericInteractionsMax = 0 # gamma_max for cluster in data: nAtomsInCluster, nClusters = cluster nStericInteractionsMin += (nAtomsInCluster-1)*nClusters nStericInteractionsMax += (nAtomsInCluster*(nAtomsInCluster-1)*0.5)*nClusters return (nStericInteractionsMin, nStericInteractionsMax) def ComputeStiffeningCoeffs(data): nStericInteractionsMin = 0 # gamma_min nStericInteractionsMax = 0 # gamma_max for cluster in data: nAtomsInCluster, nClusters = cluster nStericInteractionsMin += (nAtomsInCluster-1)*nClusters nStericInteractionsMax += (nAtomsInCluster*(nAtomsInCluster-1)*0.5)*nClusters return (nStericInteractionsMin, nStericInteractionsMax) #-----------------------------------------------------------------------------------# #-----------------------------------------------------------------------------------# #-----------------------------------------------------------------------------------# if len(sys.argv) < 2: print 'Usage:' print ' python %s <precursor type> [OH - False]' %sys.argv[0] exit() moltype = sys.argv[1] if len(sys.argv) > 2: OHGroups = True else: OHGroups = False t0 = time.time() # get all the relevant files and process each network inputfiles = glob.glob('{}_*.txt'.format(moltype)) # write all the results to the same file f = open('steric_interactions.txt', 'w') f.write('Filename : gamma_min, gamma_max\n') for inputfile in inputfiles: print 'Working with %s...' %inputfile data = LoadClusterHistogram(inputfile) gamma_min, gamma_max = ComputeStiffeningCoeffs(data) # if OHGroups: # gamma_min, gamma_max = ComputeStiffeningOH(data) # else: # gamma_min, gamma_max = ComputeStiffening(data, moltype) f.write('%s : %.4f, %.4f\n' %(inputfile, gamma_min, gamma_max)) f.close() print 'Analyzed network in %.4f seconds.' %(time.time()-t0)
[ "''' The previous code does not correcly compute the stiffening coefficients \nThis program uses the clustering data to re-compute the stiffening coefficients '''\n\nimport glob\nimport sys\nimport time\n\n#-----------------------------------------------------------------------------------#\n#-----------------------------------------------------------------------------------#\n#-----------------------------------------------------------------------------------#\n\ndef LoadClusterHistogram(inputfile):\n\tf = open(inputfile)\n\n\tdata = []\n\twhile True:\n\t\tfields = f.readline().strip().split(',')\n\t\tif len(fields)>1: \n\t\t\tnAtomsInCluster = float(fields[0])\n\t\t\tnClusters = float(fields[1])\n\t\t\tdata.append((nAtomsInCluster, nClusters))\n\t\telse:\n\t\t\tbreak\n\treturn data\n\n\ndef NIntsBetweenTerminalGroupsMax(nGroups):\n\treturn nGroups*(nGroups-1)*0.5\n\ndef NIntsBetweenTerminalGroupsMin(nGroups):\n\treturn nGroups - 1\n\ndef NTerminalGroupsInCluster(nAtomsInCluster, moltype):\n\tnAtomsPerGroup = {'EtOCSMethyl': 1.0, 'EtOCSVinyl': 2.0, 'EtOCSPhenyl': 6.0}\n\treturn int(nAtomsInCluster/nAtomsPerGroup[moltype])\n\ndef ComputeStiffening(data, moltype):\n\t# the min and max number of interactions between pairs of terminal groups \n\tnAtomIntsPerPairOfGroupsMin = {'EtOCSMethyl': 1, 'EtOCSVinyl': 1, 'EtOCSPhenyl': 4}\n\tnAtomIntsPerPairOfGroupsMax = {'EtOCSMethyl': 1, 'EtOCSVinyl': 4, 'EtOCSPhenyl': 36} \n\n\tnStericInteractionsMin = 0 # gamma_min\n\tnStericInteractionsMax = 0 # gamma_max\n\tfor cluster in data:\n\t\tnAtomsInCluster, nClusters = cluster\n\n\t\tnTerminalGroups = NTerminalGroupsInCluster(nAtomsInCluster, moltype)\n\t\t\n\t\tnGroupIntsMin = NIntsBetweenTerminalGroupsMin(nTerminalGroups)\n\t\tnGroupIntsMax = NIntsBetweenTerminalGroupsMax(nTerminalGroups)\n\n\t\tnStericInteractionsMin += nGroupIntsMin * nAtomIntsPerPairOfGroupsMin[moltype] * nClusters\n\t\tnStericInteractionsMax += nGroupIntsMax * nAtomIntsPerPairOfGroupsMax[moltype] * nClusters\n\n\treturn (nStericInteractionsMin, nStericInteractionsMax)\n\ndef ComputeStiffeningOH(data):\n\tnStericInteractionsMin = 0 # gamma_min\n\tnStericInteractionsMax = 0 # gamma_max\n\tfor cluster in data:\n\t\tnAtomsInCluster, nClusters = cluster\n\n\t\tnStericInteractionsMin += (nAtomsInCluster-1)*nClusters\n\t\tnStericInteractionsMax += (nAtomsInCluster*(nAtomsInCluster-1)*0.5)*nClusters\n\n\treturn (nStericInteractionsMin, nStericInteractionsMax)\n\n\ndef ComputeStiffeningCoeffs(data):\n\tnStericInteractionsMin = 0 # gamma_min\n\tnStericInteractionsMax = 0 # gamma_max\n\tfor cluster in data:\n\t\tnAtomsInCluster, nClusters = cluster\n\n\t\tnStericInteractionsMin += (nAtomsInCluster-1)*nClusters\n\t\tnStericInteractionsMax += (nAtomsInCluster*(nAtomsInCluster-1)*0.5)*nClusters\n\n\treturn (nStericInteractionsMin, nStericInteractionsMax)\n\n#-----------------------------------------------------------------------------------#\n#-----------------------------------------------------------------------------------#\n#-----------------------------------------------------------------------------------#\n\nif len(sys.argv) < 2:\n\tprint 'Usage:'\n\tprint ' python %s <precursor type> [OH - False]' %sys.argv[0]\n\texit()\n\nmoltype = sys.argv[1]\n\nif len(sys.argv) > 2: \n\tOHGroups = True\nelse:\n\tOHGroups = False\n\nt0 = time.time()\n\t\t\n# get all the relevant files and process each network\ninputfiles = glob.glob('{}_*.txt'.format(moltype))\n\n# write all the results to the same file\nf = open('steric_interactions.txt', 'w')\nf.write('Filename : gamma_min, gamma_max\\n')\n\nfor inputfile in inputfiles:\n\n\tprint 'Working with %s...' %inputfile\n\t\n\tdata = LoadClusterHistogram(inputfile)\n\tgamma_min, gamma_max = ComputeStiffeningCoeffs(data)\n\n\t# if OHGroups:\n\t# \tgamma_min, gamma_max = ComputeStiffeningOH(data)\n\t# else:\n\t# \tgamma_min, gamma_max = ComputeStiffening(data, moltype)\n\n\tf.write('%s : %.4f, %.4f\\n' %(inputfile, gamma_min, gamma_max))\n\nf.close()\n\nprint 'Analyzed network in %.4f seconds.' %(time.time()-t0) " ]
true
327
1f27b697985c7417e6d8d978703175a415c6c57d
import math r = float(input()) p = int(input()) obim = 2 * r * math.pi ukupanPut = p * obim # centimetre pretvaramo u metre ukupanPut = ukupanPut * 0.01 print("%.2f" % ukupanPut)
[ "import math\n\nr = float(input())\np = int(input())\nobim = 2 * r * math.pi\nukupanPut = p * obim\n# centimetre pretvaramo u metre\nukupanPut = ukupanPut * 0.01\nprint(\"%.2f\" % ukupanPut)\n", "import math\nr = float(input())\np = int(input())\nobim = 2 * r * math.pi\nukupanPut = p * obim\nukupanPut = ukupanPut * 0.01\nprint('%.2f' % ukupanPut)\n", "<import token>\nr = float(input())\np = int(input())\nobim = 2 * r * math.pi\nukupanPut = p * obim\nukupanPut = ukupanPut * 0.01\nprint('%.2f' % ukupanPut)\n", "<import token>\n<assignment token>\nprint('%.2f' % ukupanPut)\n", "<import token>\n<assignment token>\n<code token>\n" ]
false
328
71a9c9b8f47dcfbecc154c44d5a72ddbd852145a
def randomizer(n, garrafa_vidro, lata_metal, copo_plastico, bola_papel, maça_organico): lixos = [garrafa_vidro, lata_metal, copo_plastico, bola_papel, maça_organico] return lixos[n]
[ "\ndef randomizer(n, garrafa_vidro, lata_metal, copo_plastico, bola_papel, maça_organico):\n lixos = [garrafa_vidro, lata_metal, copo_plastico, bola_papel, maça_organico]\n return lixos[n]\n\n", "def randomizer(n, garrafa_vidro, lata_metal, copo_plastico, bola_papel,\n maça_organico):\n lixos = [garrafa_vidro, lata_metal, copo_plastico, bola_papel,\n maça_organico]\n return lixos[n]\n", "<function token>\n" ]
false
329
02e711dfc122007c74949cd9f86e2aeb9d334871
import numpy as np class Adaline: def __init__(self, eta = 0.0001, n_iter = 2000): self.eta = eta self.n_iter = n_iter self.error = [] def fit(self, X, Y): X = np.hstack((np.ones((X.shape[0],1)), X)) self.w = np.random.uniform(-1, 1, (X.shape[1], 1)) for n in range(self.n_iter): y = X.dot(self.w) error = Y - y self.w += self.eta * X.T.dot(error) cost = 1./2 * np.sum(error**2) self.error.append(cost) return self def predict(self, X): X = np.hstack((np.ones((X.shape[0],1)), X)) Y_hat = X.dot(self.w) return Y_hat
[ "import numpy as np\n\n\nclass Adaline:\n def __init__(self, eta = 0.0001, n_iter = 2000):\n self.eta = eta\n self.n_iter = n_iter\n self.error = []\n\n def fit(self, X, Y):\n X = np.hstack((np.ones((X.shape[0],1)), X))\n self.w = np.random.uniform(-1, 1, (X.shape[1], 1))\n for n in range(self.n_iter):\n y = X.dot(self.w)\n error = Y - y\n self.w += self.eta * X.T.dot(error)\n cost = 1./2 * np.sum(error**2)\n self.error.append(cost)\n return self\n\n def predict(self, X):\n X = np.hstack((np.ones((X.shape[0],1)), X))\n Y_hat = X.dot(self.w)\n return Y_hat\n", "import numpy as np\n\n\nclass Adaline:\n\n def __init__(self, eta=0.0001, n_iter=2000):\n self.eta = eta\n self.n_iter = n_iter\n self.error = []\n\n def fit(self, X, Y):\n X = np.hstack((np.ones((X.shape[0], 1)), X))\n self.w = np.random.uniform(-1, 1, (X.shape[1], 1))\n for n in range(self.n_iter):\n y = X.dot(self.w)\n error = Y - y\n self.w += self.eta * X.T.dot(error)\n cost = 1.0 / 2 * np.sum(error ** 2)\n self.error.append(cost)\n return self\n\n def predict(self, X):\n X = np.hstack((np.ones((X.shape[0], 1)), X))\n Y_hat = X.dot(self.w)\n return Y_hat\n", "<import token>\n\n\nclass Adaline:\n\n def __init__(self, eta=0.0001, n_iter=2000):\n self.eta = eta\n self.n_iter = n_iter\n self.error = []\n\n def fit(self, X, Y):\n X = np.hstack((np.ones((X.shape[0], 1)), X))\n self.w = np.random.uniform(-1, 1, (X.shape[1], 1))\n for n in range(self.n_iter):\n y = X.dot(self.w)\n error = Y - y\n self.w += self.eta * X.T.dot(error)\n cost = 1.0 / 2 * np.sum(error ** 2)\n self.error.append(cost)\n return self\n\n def predict(self, X):\n X = np.hstack((np.ones((X.shape[0], 1)), X))\n Y_hat = X.dot(self.w)\n return Y_hat\n", "<import token>\n\n\nclass Adaline:\n <function token>\n\n def fit(self, X, Y):\n X = np.hstack((np.ones((X.shape[0], 1)), X))\n self.w = np.random.uniform(-1, 1, (X.shape[1], 1))\n for n in range(self.n_iter):\n y = X.dot(self.w)\n error = Y - y\n self.w += self.eta * X.T.dot(error)\n cost = 1.0 / 2 * np.sum(error ** 2)\n self.error.append(cost)\n return self\n\n def predict(self, X):\n X = np.hstack((np.ones((X.shape[0], 1)), X))\n Y_hat = X.dot(self.w)\n return Y_hat\n", "<import token>\n\n\nclass Adaline:\n <function token>\n\n def fit(self, X, Y):\n X = np.hstack((np.ones((X.shape[0], 1)), X))\n self.w = np.random.uniform(-1, 1, (X.shape[1], 1))\n for n in range(self.n_iter):\n y = X.dot(self.w)\n error = Y - y\n self.w += self.eta * X.T.dot(error)\n cost = 1.0 / 2 * np.sum(error ** 2)\n self.error.append(cost)\n return self\n <function token>\n", "<import token>\n\n\nclass Adaline:\n <function token>\n <function token>\n <function token>\n", "<import token>\n<class token>\n" ]
false
330
6bf1d410a33e3b2535e39e4f8c5c7f8278b3de67
from PIL import Image from src import urbandictionary_api from src.card.cardDrawer import CardDrawer from src.card.cardModel import CardModel from src.repository import Repository from src.urbandictionary_api import get_random_word def save_card(word, image_path, filepath='data/cards/', filename=None): '''Функция для генерации и сохранения изображения Возвращает filepath+filename Параметры: word - слово, чей контент будет на карточке image - задний фон изображения filepath - путь для хранения изображения filename - имя изображения ''' content = urbandictionary_api.get_word_data(word) image = Image.open(image_path) rep = Repository() fonts = rep.fonts model = CardModel( content=content, image=image, auth_font=fonts.aut_font, cat_font=fonts.cat_font, def_font=fonts.def_font, ex_font=fonts.ex_font, rect_font=fonts.rect_font, word_font=fonts.word_font, thumb_font=fonts.thumb_font ) card_drawer = CardDrawer(model) card_drawer.draw_card() path = card_drawer.save(filepath=filepath, filename=filename) return path if __name__ == '__main__': from random import randint save_card(get_random_word(), f'data/template/backgroundimages/bgimg ({randint(1, 9)}).jpg')
[ "from PIL import Image\n\nfrom src import urbandictionary_api\nfrom src.card.cardDrawer import CardDrawer\nfrom src.card.cardModel import CardModel\nfrom src.repository import Repository\nfrom src.urbandictionary_api import get_random_word\n\n\ndef save_card(word, image_path, filepath='data/cards/', filename=None):\n '''Функция для генерации и сохранения изображения\n Возвращает filepath+filename\n \n Параметры:\n word - слово, чей контент будет на карточке\n image - задний фон изображения\n filepath - путь для хранения изображения\n filename - имя изображения\n '''\n\n content = urbandictionary_api.get_word_data(word)\n image = Image.open(image_path)\n rep = Repository()\n fonts = rep.fonts\n model = CardModel(\n content=content,\n image=image,\n auth_font=fonts.aut_font,\n cat_font=fonts.cat_font,\n def_font=fonts.def_font,\n ex_font=fonts.ex_font,\n rect_font=fonts.rect_font,\n word_font=fonts.word_font,\n thumb_font=fonts.thumb_font\n )\n\n card_drawer = CardDrawer(model)\n card_drawer.draw_card()\n path = card_drawer.save(filepath=filepath, filename=filename)\n\n return path\n\n\nif __name__ == '__main__':\n from random import randint\n\n save_card(get_random_word(), f'data/template/backgroundimages/bgimg ({randint(1, 9)}).jpg')\n", "from PIL import Image\nfrom src import urbandictionary_api\nfrom src.card.cardDrawer import CardDrawer\nfrom src.card.cardModel import CardModel\nfrom src.repository import Repository\nfrom src.urbandictionary_api import get_random_word\n\n\ndef save_card(word, image_path, filepath='data/cards/', filename=None):\n \"\"\"Функция для генерации и сохранения изображения\n Возвращает filepath+filename\n \n Параметры:\n word - слово, чей контент будет на карточке\n image - задний фон изображения\n filepath - путь для хранения изображения\n filename - имя изображения\n \"\"\"\n content = urbandictionary_api.get_word_data(word)\n image = Image.open(image_path)\n rep = Repository()\n fonts = rep.fonts\n model = CardModel(content=content, image=image, auth_font=fonts.\n aut_font, cat_font=fonts.cat_font, def_font=fonts.def_font, ex_font\n =fonts.ex_font, rect_font=fonts.rect_font, word_font=fonts.\n word_font, thumb_font=fonts.thumb_font)\n card_drawer = CardDrawer(model)\n card_drawer.draw_card()\n path = card_drawer.save(filepath=filepath, filename=filename)\n return path\n\n\nif __name__ == '__main__':\n from random import randint\n save_card(get_random_word(),\n f'data/template/backgroundimages/bgimg ({randint(1, 9)}).jpg')\n", "<import token>\n\n\ndef save_card(word, image_path, filepath='data/cards/', filename=None):\n \"\"\"Функция для генерации и сохранения изображения\n Возвращает filepath+filename\n \n Параметры:\n word - слово, чей контент будет на карточке\n image - задний фон изображения\n filepath - путь для хранения изображения\n filename - имя изображения\n \"\"\"\n content = urbandictionary_api.get_word_data(word)\n image = Image.open(image_path)\n rep = Repository()\n fonts = rep.fonts\n model = CardModel(content=content, image=image, auth_font=fonts.\n aut_font, cat_font=fonts.cat_font, def_font=fonts.def_font, ex_font\n =fonts.ex_font, rect_font=fonts.rect_font, word_font=fonts.\n word_font, thumb_font=fonts.thumb_font)\n card_drawer = CardDrawer(model)\n card_drawer.draw_card()\n path = card_drawer.save(filepath=filepath, filename=filename)\n return path\n\n\nif __name__ == '__main__':\n from random import randint\n save_card(get_random_word(),\n f'data/template/backgroundimages/bgimg ({randint(1, 9)}).jpg')\n", "<import token>\n\n\ndef save_card(word, image_path, filepath='data/cards/', filename=None):\n \"\"\"Функция для генерации и сохранения изображения\n Возвращает filepath+filename\n \n Параметры:\n word - слово, чей контент будет на карточке\n image - задний фон изображения\n filepath - путь для хранения изображения\n filename - имя изображения\n \"\"\"\n content = urbandictionary_api.get_word_data(word)\n image = Image.open(image_path)\n rep = Repository()\n fonts = rep.fonts\n model = CardModel(content=content, image=image, auth_font=fonts.\n aut_font, cat_font=fonts.cat_font, def_font=fonts.def_font, ex_font\n =fonts.ex_font, rect_font=fonts.rect_font, word_font=fonts.\n word_font, thumb_font=fonts.thumb_font)\n card_drawer = CardDrawer(model)\n card_drawer.draw_card()\n path = card_drawer.save(filepath=filepath, filename=filename)\n return path\n\n\n<code token>\n", "<import token>\n<function token>\n<code token>\n" ]
false
331
a096e811e50e25e47a9b76b1f813c51f4307bbfe
import django_filters from .models import Drinks, Brand class DrinkFilter(django_filters.FilterSet): BRAND_CHOICES = tuple( (brand.name, brand.name) for brand in Brand.objects.all()) name = django_filters.CharFilter(lookup_expr='icontains') price_lt = django_filters.NumberFilter(field_name='price', lookup_expr='lt') price_gt = django_filters.NumberFilter(field_name='price', lookup_expr='gt') likes_lt = django_filters.NumberFilter(field_name='likes', lookup_expr='lt') likes_gt = django_filters.NumberFilter(field_name='likes', lookup_expr='gt') brands = django_filters.MultipleChoiceFilter(field_name='brand__name', choices=BRAND_CHOICES) class Meta: model = Drinks fields = ['name', 'brands'] """ f = F({'date_after': '2016-01-01', 'date_before': '2016-02-01'}) """
[ "import django_filters\nfrom .models import Drinks, Brand\n\n\nclass DrinkFilter(django_filters.FilterSet):\n BRAND_CHOICES = tuple(\n (brand.name, brand.name) for brand in Brand.objects.all())\n name = django_filters.CharFilter(lookup_expr='icontains')\n price_lt = django_filters.NumberFilter(field_name='price',\n lookup_expr='lt')\n price_gt = django_filters.NumberFilter(field_name='price',\n lookup_expr='gt')\n likes_lt = django_filters.NumberFilter(field_name='likes',\n lookup_expr='lt')\n likes_gt = django_filters.NumberFilter(field_name='likes',\n lookup_expr='gt')\n brands = django_filters.MultipleChoiceFilter(field_name='brand__name',\n choices=BRAND_CHOICES)\n\n class Meta:\n model = Drinks\n fields = ['name', 'brands']\n\n\n\"\"\"\nf = F({'date_after': '2016-01-01', 'date_before': '2016-02-01'})\n\"\"\"", "import django_filters\nfrom .models import Drinks, Brand\n\n\nclass DrinkFilter(django_filters.FilterSet):\n BRAND_CHOICES = tuple((brand.name, brand.name) for brand in Brand.\n objects.all())\n name = django_filters.CharFilter(lookup_expr='icontains')\n price_lt = django_filters.NumberFilter(field_name='price', lookup_expr='lt'\n )\n price_gt = django_filters.NumberFilter(field_name='price', lookup_expr='gt'\n )\n likes_lt = django_filters.NumberFilter(field_name='likes', lookup_expr='lt'\n )\n likes_gt = django_filters.NumberFilter(field_name='likes', lookup_expr='gt'\n )\n brands = django_filters.MultipleChoiceFilter(field_name='brand__name',\n choices=BRAND_CHOICES)\n\n\n class Meta:\n model = Drinks\n fields = ['name', 'brands']\n\n\n<docstring token>\n", "<import token>\n\n\nclass DrinkFilter(django_filters.FilterSet):\n BRAND_CHOICES = tuple((brand.name, brand.name) for brand in Brand.\n objects.all())\n name = django_filters.CharFilter(lookup_expr='icontains')\n price_lt = django_filters.NumberFilter(field_name='price', lookup_expr='lt'\n )\n price_gt = django_filters.NumberFilter(field_name='price', lookup_expr='gt'\n )\n likes_lt = django_filters.NumberFilter(field_name='likes', lookup_expr='lt'\n )\n likes_gt = django_filters.NumberFilter(field_name='likes', lookup_expr='gt'\n )\n brands = django_filters.MultipleChoiceFilter(field_name='brand__name',\n choices=BRAND_CHOICES)\n\n\n class Meta:\n model = Drinks\n fields = ['name', 'brands']\n\n\n<docstring token>\n", "<import token>\n\n\nclass DrinkFilter(django_filters.FilterSet):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\n class Meta:\n model = Drinks\n fields = ['name', 'brands']\n\n\n<docstring token>\n", "<import token>\n<class token>\n<docstring token>\n" ]
false
332
3ea42e7ad5301314a39bf522280c084342cd18c5
from flask import render_template, request, Response from flask.views import MethodView, View from flask.views import View from repo import ClassifierRepo from services import PredictDigitService from settings import CLASSIFIER_STORAGE class IndexView(View): def dispatch_request(self): return render_template('index.html') class PredictDigitView(MethodView): def post(self): repo = ClassifierRepo(CLASSIFIER_STORAGE) service = PredictDigitService(repo) image_data_uri = request.json['image'] prediction = service.handle(image_data_uri) return Response(str(prediction).encode(), status=200)
[ "from flask import render_template, request, Response\nfrom flask.views import MethodView, View\n\nfrom flask.views import View\n\nfrom repo import ClassifierRepo\nfrom services import PredictDigitService\nfrom settings import CLASSIFIER_STORAGE\n\nclass IndexView(View):\n def dispatch_request(self):\n return render_template('index.html')\n\nclass PredictDigitView(MethodView):\n def post(self):\n repo = ClassifierRepo(CLASSIFIER_STORAGE)\n service = PredictDigitService(repo)\n image_data_uri = request.json['image']\n prediction = service.handle(image_data_uri)\n return Response(str(prediction).encode(), status=200)\n", "from flask import render_template, request, Response\nfrom flask.views import MethodView, View\nfrom flask.views import View\nfrom repo import ClassifierRepo\nfrom services import PredictDigitService\nfrom settings import CLASSIFIER_STORAGE\n\n\nclass IndexView(View):\n\n def dispatch_request(self):\n return render_template('index.html')\n\n\nclass PredictDigitView(MethodView):\n\n def post(self):\n repo = ClassifierRepo(CLASSIFIER_STORAGE)\n service = PredictDigitService(repo)\n image_data_uri = request.json['image']\n prediction = service.handle(image_data_uri)\n return Response(str(prediction).encode(), status=200)\n", "<import token>\n\n\nclass IndexView(View):\n\n def dispatch_request(self):\n return render_template('index.html')\n\n\nclass PredictDigitView(MethodView):\n\n def post(self):\n repo = ClassifierRepo(CLASSIFIER_STORAGE)\n service = PredictDigitService(repo)\n image_data_uri = request.json['image']\n prediction = service.handle(image_data_uri)\n return Response(str(prediction).encode(), status=200)\n", "<import token>\n\n\nclass IndexView(View):\n <function token>\n\n\nclass PredictDigitView(MethodView):\n\n def post(self):\n repo = ClassifierRepo(CLASSIFIER_STORAGE)\n service = PredictDigitService(repo)\n image_data_uri = request.json['image']\n prediction = service.handle(image_data_uri)\n return Response(str(prediction).encode(), status=200)\n", "<import token>\n<class token>\n\n\nclass PredictDigitView(MethodView):\n\n def post(self):\n repo = ClassifierRepo(CLASSIFIER_STORAGE)\n service = PredictDigitService(repo)\n image_data_uri = request.json['image']\n prediction = service.handle(image_data_uri)\n return Response(str(prediction).encode(), status=200)\n", "<import token>\n<class token>\n\n\nclass PredictDigitView(MethodView):\n <function token>\n", "<import token>\n<class token>\n<class token>\n" ]
false
333
0c97569c77fb3598d83eba607960328bb2134dd2
from __future__ import print_function, division import os from os.path import exists, join, basename, dirname from os import makedirs import numpy as np import datetime import time import argparse import torch import torch.nn as nn import torch.optim as optim from lib.dataloader import DataLoader from lib.im_pair_dataset import ImagePairDataset from lib.normalization import NormalizeImageDict from lib.torch_util import save_checkpoint from lib.torch_util import BatchTensorToVars from lib.eval_util_dynamic import pfdataset_pck, pfpascal_val_dataloader # import DCCNet from models.model_dynamic import DCCNet from models.loss_dynamic import weak_loss # Seed and CUDA use_cuda = torch.cuda.is_available() torch.manual_seed(1) if use_cuda: torch.cuda.manual_seed(1) np.random.seed(1) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False print('DCCNet training script') # Argument parsing parser = argparse.ArgumentParser(description='Compute PF Pascal matches') parser.add_argument('--checkpoint', type=str, default='') parser.add_argument('--image_size', type=int, default=400) parser.add_argument('--dataset_image_path', type=str, default='datasets/pf-pascal/', help='path to PF Pascal dataset') parser.add_argument('--dataset_csv_path', type=str, default='datasets/pf-pascal/image_pairs/', help='path to PF Pascal training csv') parser.add_argument('--num_epochs', type=int, default=5, help='number of training epochs') parser.add_argument('--batch_size', type=int, default=16, help='training batch size') parser.add_argument('--lr', type=float, default=0.0005, help='learning rate') parser.add_argument('--result_model_fn', type=str, default='checkpoint_adam', help='trained model filename') parser.add_argument('--result-model-dir', type=str, default='../model/checkpoints', help='path to trained models folder') parser.add_argument('--fe_finetune_params', type=int, default=0, help='number of layers to finetune') parser.add_argument('--exp_name', type=str, default='exp_delete', help='experiment name') # DCCNet args parser.add_argument('--ncons_kernel_sizes', nargs='+', type=int, default=[5,5,5], help='kernels sizes in neigh. cons.') parser.add_argument('--ncons_channels', nargs='+', type=int, default=[16,16,1], help='channels in neigh. cons') parser.add_argument('--sce_kernel_size',type=int,default=25,help='kernel size in sce.') parser.add_argument('--sce_hidden_dim',type=int,default=1024,help='hidden dim in sce') parser.add_argument('--scaleloss_weight',type=float,default=1.0,help='whether use scale loss, if use the weight for scale loss') parser.add_argument('--att_scale_ncons_kernel_sizes', nargs='+', type=int, default=[5,5,5], help='kernels sizes in dynamic fusion net.') parser.add_argument('--att_scale_ncons_channels', nargs='+', type=int, default=[16,16,1], help='channels in dynamic fusion net') args = parser.parse_args() print(args) # Create model print('Creating CNN model...') model = DCCNet(use_cuda=use_cuda, checkpoint=args.checkpoint, ncons_kernel_sizes=args.ncons_kernel_sizes, ncons_channels=args.ncons_channels, sce_kernel_size=args.sce_kernel_size, sce_hidden_dim=args.sce_hidden_dim, att_scale_ncons_kernel_sizes=args.att_scale_ncons_kernel_sizes, att_scale_ncons_channels=args.att_scale_ncons_channels, ) #Multi-GPU support model = nn.DataParallel(model) # Set which parts of the model to train if args.fe_finetune_params>0: for i in range(args.fe_finetune_params): for p in model.module.FeatureExtraction.model[-1][-(i+1)].parameters(): p.requires_grad=True print('Trainable parameters:') count = 0 for i,param in enumerate(model.named_parameters()): name,p = param if p.requires_grad: count+=1 print(str(count)+": "+name+"\t"+str(p.shape)+"\t") print(model) # Optimizer print('using Adam optimizer') optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr) cnn_image_size=(args.image_size,args.image_size) Dataset = ImagePairDataset train_csv = 'train_pairs.csv' #val_pairs_nocoords.csv: for compute loss, with flip column in csv, no coordinates #val_pairs.csv: for compute pck, with coordinates val_nocoordinates_csv = 'val_pairs_nocoords.csv' val_csv = 'image_pairs/val_pairs.csv' normalization_tnf = NormalizeImageDict(['source_image','target_image']) batch_preprocessing_fn = BatchTensorToVars(use_cuda=use_cuda) # Dataset and dataloader dataset = Dataset(transform=normalization_tnf, dataset_image_path=args.dataset_image_path, dataset_csv_path=args.dataset_csv_path, dataset_csv_file = train_csv, output_size=cnn_image_size, ) dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True, num_workers=0) dataset_val = Dataset(transform=normalization_tnf, dataset_image_path=args.dataset_image_path, dataset_csv_path=args.dataset_csv_path, dataset_csv_file=val_nocoordinates_csv, output_size=cnn_image_size) # compute val loss dataloader_val = DataLoader(dataset_val, batch_size=args.batch_size, shuffle=True, num_workers=4) # compute val pck dataloader_val_pck = pfpascal_val_dataloader(image_size=args.image_size, eval_dataset_path=args.dataset_image_path, csv_file=val_csv) #load pfpascal val dataset # Define checkpoint name checkpoint_dir = os.path.join(args.result_model_dir,args.exp_name) checkpoint_name = os.path.join(args.result_model_dir,args.exp_name, datetime.datetime.now().strftime("%Y-%m-%d_%H:%M")+'_'+args.result_model_fn + '.pth.tar') log_name = os.path.join(args.result_model_dir,args.exp_name, 'logmain_'+args.exp_name+'.txt') if not exists(dirname(log_name)): makedirs(dirname(log_name)) print('Checkpoint name: '+checkpoint_name) # Train best_val_pck = float("-inf") loss_fn = lambda model,batch: weak_loss(model, batch, normalization='softmax', scaleloss_weight=args.scaleloss_weight) # define epoch function def process_epoch(mode,epoch,model,loss_fn,optimizer,dataloader,batch_preprocessing_fn,use_cuda=True,log_interval=50): epoch_loss = 0 for batch_idx, batch in enumerate(dataloader): st = time.time() if mode=='train': optimizer.zero_grad() tnf_batch = batch_preprocessing_fn(batch) loss = loss_fn(model,tnf_batch) loss_np = loss.data.cpu().numpy()[0] #loss_np = loss.data.cpu().numpy() epoch_loss += loss_np if mode=='train': loss.backward() optimizer.step() else: loss=None if batch_idx % log_interval == 0: print(mode.capitalize()+' Epoch: {} [{}/{} ({:.0f}%)]\t\tLoss: {:.12f}\t\tcost time: {:.1f}'.format( epoch, batch_idx , len(dataloader), 100. * batch_idx / len(dataloader), loss_np,time.time()-st)) epoch_loss /= len(dataloader) print(mode.capitalize()+' set: Average loss: {:.12f}'.format(epoch_loss)) return epoch_loss train_loss = np.zeros(args.num_epochs) val_loss = np.zeros(args.num_epochs) val_pcks = np.zeros(args.num_epochs) model.module.FeatureExtraction.eval() print('Starting training...') for epoch in range(1, args.num_epochs+1): st = time.time() train_loss_curepoch = process_epoch('train',epoch,model,loss_fn,optimizer,dataloader,batch_preprocessing_fn,log_interval=1) time_train = time.time()-st st = time.time() val_loss_curepoch = process_epoch('val', epoch, model, loss_fn, optimizer, dataloader_val, batch_preprocessing_fn, log_interval=1) time_valloss = time.time()-st st = time.time() val_pck_curepoch = pfdataset_pck(dataloader=dataloader_val_pck,model=model,verbose=False) time_valpck = time.time()-st train_loss[epoch - 1] = train_loss_curepoch val_loss[epoch - 1] = val_loss_curepoch val_pcks[epoch-1] = val_pck_curepoch # remember best loss is_best = val_pcks[epoch - 1] > best_val_pck best_val_pck = max(val_pcks[epoch - 1], best_val_pck) save_checkpoint({ 'epoch': epoch, 'args': args, 'state_dict': model.state_dict(), 'optimizer' : optimizer.state_dict(), 'train_loss': train_loss, 'val_loss': val_loss, 'val_pck': val_pcks, 'best_val_pck':best_val_pck, }, is_best,checkpoint_name,save_all_epochs=False) message = 'Epoch{}\tTrain_loss{:.6f}\tcost time{:.1f}\tVal_loss{:.6f}\tcost time{:.1f}\tVal_pck{:.6f}\tcost time{:.1f}\n'.format\ (epoch, train_loss_curepoch, time_train, val_loss_curepoch, time_valloss,val_pck_curepoch,time_valpck,) print(message) with open(log_name, "a") as log_file: log_file.write('%s\n' % message) print('Done!')
[ "from __future__ import print_function, division\nimport os\nfrom os.path import exists, join, basename, dirname\nfrom os import makedirs\nimport numpy as np\nimport datetime\nimport time\nimport argparse\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom lib.dataloader import DataLoader\nfrom lib.im_pair_dataset import ImagePairDataset\nfrom lib.normalization import NormalizeImageDict\nfrom lib.torch_util import save_checkpoint\nfrom lib.torch_util import BatchTensorToVars\nfrom lib.eval_util_dynamic import pfdataset_pck, pfpascal_val_dataloader\n\n# import DCCNet\nfrom models.model_dynamic import DCCNet\nfrom models.loss_dynamic import weak_loss\n\n\n# Seed and CUDA\nuse_cuda = torch.cuda.is_available()\ntorch.manual_seed(1)\nif use_cuda:\n torch.cuda.manual_seed(1)\nnp.random.seed(1)\n\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n\nprint('DCCNet training script')\n\n# Argument parsing\nparser = argparse.ArgumentParser(description='Compute PF Pascal matches')\nparser.add_argument('--checkpoint', type=str, default='')\nparser.add_argument('--image_size', type=int, default=400)\nparser.add_argument('--dataset_image_path', type=str, default='datasets/pf-pascal/', help='path to PF Pascal dataset')\nparser.add_argument('--dataset_csv_path', type=str, default='datasets/pf-pascal/image_pairs/', help='path to PF Pascal training csv')\nparser.add_argument('--num_epochs', type=int, default=5, help='number of training epochs')\nparser.add_argument('--batch_size', type=int, default=16, help='training batch size')\nparser.add_argument('--lr', type=float, default=0.0005, help='learning rate')\nparser.add_argument('--result_model_fn', type=str, default='checkpoint_adam', help='trained model filename')\nparser.add_argument('--result-model-dir', type=str, default='../model/checkpoints', help='path to trained models folder')\nparser.add_argument('--fe_finetune_params', type=int, default=0, help='number of layers to finetune')\nparser.add_argument('--exp_name', type=str, default='exp_delete', help='experiment name')\n\n# DCCNet args\nparser.add_argument('--ncons_kernel_sizes', nargs='+', type=int, default=[5,5,5], help='kernels sizes in neigh. cons.')\nparser.add_argument('--ncons_channels', nargs='+', type=int, default=[16,16,1], help='channels in neigh. cons')\n\nparser.add_argument('--sce_kernel_size',type=int,default=25,help='kernel size in sce.')\nparser.add_argument('--sce_hidden_dim',type=int,default=1024,help='hidden dim in sce')\nparser.add_argument('--scaleloss_weight',type=float,default=1.0,help='whether use scale loss, if use the weight for scale loss')\nparser.add_argument('--att_scale_ncons_kernel_sizes', nargs='+', type=int, default=[5,5,5], help='kernels sizes in dynamic fusion net.')\nparser.add_argument('--att_scale_ncons_channels', nargs='+', type=int, default=[16,16,1], help='channels in dynamic fusion net')\n\nargs = parser.parse_args()\nprint(args)\n\n# Create model\nprint('Creating CNN model...')\nmodel = DCCNet(use_cuda=use_cuda,\n checkpoint=args.checkpoint,\n ncons_kernel_sizes=args.ncons_kernel_sizes,\n ncons_channels=args.ncons_channels,\n sce_kernel_size=args.sce_kernel_size,\n sce_hidden_dim=args.sce_hidden_dim,\n att_scale_ncons_kernel_sizes=args.att_scale_ncons_kernel_sizes,\n att_scale_ncons_channels=args.att_scale_ncons_channels,\n )\n\n#Multi-GPU support\nmodel = nn.DataParallel(model)\n\n# Set which parts of the model to train\nif args.fe_finetune_params>0:\n for i in range(args.fe_finetune_params):\n for p in model.module.FeatureExtraction.model[-1][-(i+1)].parameters():\n p.requires_grad=True\n\nprint('Trainable parameters:')\ncount = 0\nfor i,param in enumerate(model.named_parameters()):\n name,p = param\n if p.requires_grad:\n count+=1\n print(str(count)+\": \"+name+\"\\t\"+str(p.shape)+\"\\t\")\n\nprint(model)\n\n\n# Optimizer\nprint('using Adam optimizer')\noptimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr)\n \ncnn_image_size=(args.image_size,args.image_size)\n\nDataset = ImagePairDataset\ntrain_csv = 'train_pairs.csv'\n#val_pairs_nocoords.csv: for compute loss, with flip column in csv, no coordinates\n#val_pairs.csv: for compute pck, with coordinates\nval_nocoordinates_csv = 'val_pairs_nocoords.csv'\nval_csv = 'image_pairs/val_pairs.csv'\n\n\nnormalization_tnf = NormalizeImageDict(['source_image','target_image'])\nbatch_preprocessing_fn = BatchTensorToVars(use_cuda=use_cuda) \n\n# Dataset and dataloader\ndataset = Dataset(transform=normalization_tnf,\n\t dataset_image_path=args.dataset_image_path,\n\t dataset_csv_path=args.dataset_csv_path,\n dataset_csv_file = train_csv,\n output_size=cnn_image_size,\n )\n\ndataloader = DataLoader(dataset, batch_size=args.batch_size,\n shuffle=True, \n num_workers=0)\n\ndataset_val = Dataset(transform=normalization_tnf,\n dataset_image_path=args.dataset_image_path,\n dataset_csv_path=args.dataset_csv_path,\n dataset_csv_file=val_nocoordinates_csv,\n output_size=cnn_image_size)\n\n# compute val loss\ndataloader_val = DataLoader(dataset_val, batch_size=args.batch_size,\n shuffle=True, num_workers=4)\n\n# compute val pck\ndataloader_val_pck = pfpascal_val_dataloader(image_size=args.image_size, eval_dataset_path=args.dataset_image_path, csv_file=val_csv) #load pfpascal val dataset\n\n# Define checkpoint name\ncheckpoint_dir = os.path.join(args.result_model_dir,args.exp_name)\ncheckpoint_name = os.path.join(args.result_model_dir,args.exp_name,\n datetime.datetime.now().strftime(\"%Y-%m-%d_%H:%M\")+'_'+args.result_model_fn + '.pth.tar')\nlog_name = os.path.join(args.result_model_dir,args.exp_name, 'logmain_'+args.exp_name+'.txt')\nif not exists(dirname(log_name)):\n makedirs(dirname(log_name))\nprint('Checkpoint name: '+checkpoint_name)\n \n# Train\nbest_val_pck = float(\"-inf\")\n\nloss_fn = lambda model,batch: weak_loss(model, batch, normalization='softmax', scaleloss_weight=args.scaleloss_weight)\n\n# define epoch function\ndef process_epoch(mode,epoch,model,loss_fn,optimizer,dataloader,batch_preprocessing_fn,use_cuda=True,log_interval=50):\n epoch_loss = 0\n for batch_idx, batch in enumerate(dataloader):\n\n st = time.time()\n\n if mode=='train': \n optimizer.zero_grad()\n tnf_batch = batch_preprocessing_fn(batch)\n loss = loss_fn(model,tnf_batch)\n loss_np = loss.data.cpu().numpy()[0]\n #loss_np = loss.data.cpu().numpy()\n epoch_loss += loss_np\n if mode=='train':\n loss.backward()\n optimizer.step()\n else:\n loss=None\n if batch_idx % log_interval == 0:\n print(mode.capitalize()+' Epoch: {} [{}/{} ({:.0f}%)]\\t\\tLoss: {:.12f}\\t\\tcost time: {:.1f}'.format(\n epoch, batch_idx , len(dataloader),\n 100. * batch_idx / len(dataloader), loss_np,time.time()-st))\n epoch_loss /= len(dataloader)\n print(mode.capitalize()+' set: Average loss: {:.12f}'.format(epoch_loss))\n return epoch_loss\n\ntrain_loss = np.zeros(args.num_epochs)\nval_loss = np.zeros(args.num_epochs)\nval_pcks = np.zeros(args.num_epochs)\n\nmodel.module.FeatureExtraction.eval()\n\n\nprint('Starting training...')\nfor epoch in range(1, args.num_epochs+1):\n st = time.time()\n train_loss_curepoch = process_epoch('train',epoch,model,loss_fn,optimizer,dataloader,batch_preprocessing_fn,log_interval=1)\n time_train = time.time()-st\n\n st = time.time()\n\n val_loss_curepoch = process_epoch('val', epoch, model, loss_fn, optimizer, dataloader_val, batch_preprocessing_fn, log_interval=1)\n\n time_valloss = time.time()-st\n\n st = time.time()\n val_pck_curepoch = pfdataset_pck(dataloader=dataloader_val_pck,model=model,verbose=False)\n time_valpck = time.time()-st\n\n train_loss[epoch - 1] = train_loss_curepoch\n val_loss[epoch - 1] = val_loss_curepoch\n val_pcks[epoch-1] = val_pck_curepoch\n\n # remember best loss\n is_best = val_pcks[epoch - 1] > best_val_pck\n best_val_pck = max(val_pcks[epoch - 1], best_val_pck)\n save_checkpoint({\n 'epoch': epoch,\n 'args': args,\n 'state_dict': model.state_dict(),\n 'optimizer' : optimizer.state_dict(),\n 'train_loss': train_loss,\n 'val_loss': val_loss,\n 'val_pck': val_pcks,\n 'best_val_pck':best_val_pck,\n }, is_best,checkpoint_name,save_all_epochs=False)\n\n message = 'Epoch{}\\tTrain_loss{:.6f}\\tcost time{:.1f}\\tVal_loss{:.6f}\\tcost time{:.1f}\\tVal_pck{:.6f}\\tcost time{:.1f}\\n'.format\\\n (epoch, train_loss_curepoch, time_train, val_loss_curepoch, time_valloss,val_pck_curepoch,time_valpck,)\n print(message)\n with open(log_name, \"a\") as log_file:\n log_file.write('%s\\n' % message)\n\n\nprint('Done!')\n", "from __future__ import print_function, division\nimport os\nfrom os.path import exists, join, basename, dirname\nfrom os import makedirs\nimport numpy as np\nimport datetime\nimport time\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom lib.dataloader import DataLoader\nfrom lib.im_pair_dataset import ImagePairDataset\nfrom lib.normalization import NormalizeImageDict\nfrom lib.torch_util import save_checkpoint\nfrom lib.torch_util import BatchTensorToVars\nfrom lib.eval_util_dynamic import pfdataset_pck, pfpascal_val_dataloader\nfrom models.model_dynamic import DCCNet\nfrom models.loss_dynamic import weak_loss\nuse_cuda = torch.cuda.is_available()\ntorch.manual_seed(1)\nif use_cuda:\n torch.cuda.manual_seed(1)\nnp.random.seed(1)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\nprint('DCCNet training script')\nparser = argparse.ArgumentParser(description='Compute PF Pascal matches')\nparser.add_argument('--checkpoint', type=str, default='')\nparser.add_argument('--image_size', type=int, default=400)\nparser.add_argument('--dataset_image_path', type=str, default=\n 'datasets/pf-pascal/', help='path to PF Pascal dataset')\nparser.add_argument('--dataset_csv_path', type=str, default=\n 'datasets/pf-pascal/image_pairs/', help='path to PF Pascal training csv')\nparser.add_argument('--num_epochs', type=int, default=5, help=\n 'number of training epochs')\nparser.add_argument('--batch_size', type=int, default=16, help=\n 'training batch size')\nparser.add_argument('--lr', type=float, default=0.0005, help='learning rate')\nparser.add_argument('--result_model_fn', type=str, default=\n 'checkpoint_adam', help='trained model filename')\nparser.add_argument('--result-model-dir', type=str, default=\n '../model/checkpoints', help='path to trained models folder')\nparser.add_argument('--fe_finetune_params', type=int, default=0, help=\n 'number of layers to finetune')\nparser.add_argument('--exp_name', type=str, default='exp_delete', help=\n 'experiment name')\nparser.add_argument('--ncons_kernel_sizes', nargs='+', type=int, default=[5,\n 5, 5], help='kernels sizes in neigh. cons.')\nparser.add_argument('--ncons_channels', nargs='+', type=int, default=[16, \n 16, 1], help='channels in neigh. cons')\nparser.add_argument('--sce_kernel_size', type=int, default=25, help=\n 'kernel size in sce.')\nparser.add_argument('--sce_hidden_dim', type=int, default=1024, help=\n 'hidden dim in sce')\nparser.add_argument('--scaleloss_weight', type=float, default=1.0, help=\n 'whether use scale loss, if use the weight for scale loss')\nparser.add_argument('--att_scale_ncons_kernel_sizes', nargs='+', type=int,\n default=[5, 5, 5], help='kernels sizes in dynamic fusion net.')\nparser.add_argument('--att_scale_ncons_channels', nargs='+', type=int,\n default=[16, 16, 1], help='channels in dynamic fusion net')\nargs = parser.parse_args()\nprint(args)\nprint('Creating CNN model...')\nmodel = DCCNet(use_cuda=use_cuda, checkpoint=args.checkpoint,\n ncons_kernel_sizes=args.ncons_kernel_sizes, ncons_channels=args.\n ncons_channels, sce_kernel_size=args.sce_kernel_size, sce_hidden_dim=\n args.sce_hidden_dim, att_scale_ncons_kernel_sizes=args.\n att_scale_ncons_kernel_sizes, att_scale_ncons_channels=args.\n att_scale_ncons_channels)\nmodel = nn.DataParallel(model)\nif args.fe_finetune_params > 0:\n for i in range(args.fe_finetune_params):\n for p in model.module.FeatureExtraction.model[-1][-(i + 1)].parameters(\n ):\n p.requires_grad = True\nprint('Trainable parameters:')\ncount = 0\nfor i, param in enumerate(model.named_parameters()):\n name, p = param\n if p.requires_grad:\n count += 1\n print(str(count) + ': ' + name + '\\t' + str(p.shape) + '\\t')\nprint(model)\nprint('using Adam optimizer')\noptimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()\n ), lr=args.lr)\ncnn_image_size = args.image_size, args.image_size\nDataset = ImagePairDataset\ntrain_csv = 'train_pairs.csv'\nval_nocoordinates_csv = 'val_pairs_nocoords.csv'\nval_csv = 'image_pairs/val_pairs.csv'\nnormalization_tnf = NormalizeImageDict(['source_image', 'target_image'])\nbatch_preprocessing_fn = BatchTensorToVars(use_cuda=use_cuda)\ndataset = Dataset(transform=normalization_tnf, dataset_image_path=args.\n dataset_image_path, dataset_csv_path=args.dataset_csv_path,\n dataset_csv_file=train_csv, output_size=cnn_image_size)\ndataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=0)\ndataset_val = Dataset(transform=normalization_tnf, dataset_image_path=args.\n dataset_image_path, dataset_csv_path=args.dataset_csv_path,\n dataset_csv_file=val_nocoordinates_csv, output_size=cnn_image_size)\ndataloader_val = DataLoader(dataset_val, batch_size=args.batch_size,\n shuffle=True, num_workers=4)\ndataloader_val_pck = pfpascal_val_dataloader(image_size=args.image_size,\n eval_dataset_path=args.dataset_image_path, csv_file=val_csv)\ncheckpoint_dir = os.path.join(args.result_model_dir, args.exp_name)\ncheckpoint_name = os.path.join(args.result_model_dir, args.exp_name, \n datetime.datetime.now().strftime('%Y-%m-%d_%H:%M') + '_' + args.\n result_model_fn + '.pth.tar')\nlog_name = os.path.join(args.result_model_dir, args.exp_name, 'logmain_' +\n args.exp_name + '.txt')\nif not exists(dirname(log_name)):\n makedirs(dirname(log_name))\nprint('Checkpoint name: ' + checkpoint_name)\nbest_val_pck = float('-inf')\nloss_fn = lambda model, batch: weak_loss(model, batch, normalization=\n 'softmax', scaleloss_weight=args.scaleloss_weight)\n\n\ndef process_epoch(mode, epoch, model, loss_fn, optimizer, dataloader,\n batch_preprocessing_fn, use_cuda=True, log_interval=50):\n epoch_loss = 0\n for batch_idx, batch in enumerate(dataloader):\n st = time.time()\n if mode == 'train':\n optimizer.zero_grad()\n tnf_batch = batch_preprocessing_fn(batch)\n loss = loss_fn(model, tnf_batch)\n loss_np = loss.data.cpu().numpy()[0]\n epoch_loss += loss_np\n if mode == 'train':\n loss.backward()\n optimizer.step()\n else:\n loss = None\n if batch_idx % log_interval == 0:\n print(mode.capitalize() +\n ' Epoch: {} [{}/{} ({:.0f}%)]\\t\\tLoss: {:.12f}\\t\\tcost time: {:.1f}'\n .format(epoch, batch_idx, len(dataloader), 100.0 *\n batch_idx / len(dataloader), loss_np, time.time() - st))\n epoch_loss /= len(dataloader)\n print(mode.capitalize() + ' set: Average loss: {:.12f}'.format(epoch_loss))\n return epoch_loss\n\n\ntrain_loss = np.zeros(args.num_epochs)\nval_loss = np.zeros(args.num_epochs)\nval_pcks = np.zeros(args.num_epochs)\nmodel.module.FeatureExtraction.eval()\nprint('Starting training...')\nfor epoch in range(1, args.num_epochs + 1):\n st = time.time()\n train_loss_curepoch = process_epoch('train', epoch, model, loss_fn,\n optimizer, dataloader, batch_preprocessing_fn, log_interval=1)\n time_train = time.time() - st\n st = time.time()\n val_loss_curepoch = process_epoch('val', epoch, model, loss_fn,\n optimizer, dataloader_val, batch_preprocessing_fn, log_interval=1)\n time_valloss = time.time() - st\n st = time.time()\n val_pck_curepoch = pfdataset_pck(dataloader=dataloader_val_pck, model=\n model, verbose=False)\n time_valpck = time.time() - st\n train_loss[epoch - 1] = train_loss_curepoch\n val_loss[epoch - 1] = val_loss_curepoch\n val_pcks[epoch - 1] = val_pck_curepoch\n is_best = val_pcks[epoch - 1] > best_val_pck\n best_val_pck = max(val_pcks[epoch - 1], best_val_pck)\n save_checkpoint({'epoch': epoch, 'args': args, 'state_dict': model.\n state_dict(), 'optimizer': optimizer.state_dict(), 'train_loss':\n train_loss, 'val_loss': val_loss, 'val_pck': val_pcks,\n 'best_val_pck': best_val_pck}, is_best, checkpoint_name,\n save_all_epochs=False)\n message = (\n \"\"\"Epoch{}\tTrain_loss{:.6f}\tcost time{:.1f}\tVal_loss{:.6f}\tcost time{:.1f}\tVal_pck{:.6f}\tcost time{:.1f}\n\"\"\"\n .format(epoch, train_loss_curepoch, time_train, val_loss_curepoch,\n time_valloss, val_pck_curepoch, time_valpck))\n print(message)\n with open(log_name, 'a') as log_file:\n log_file.write('%s\\n' % message)\nprint('Done!')\n", "<import token>\nuse_cuda = torch.cuda.is_available()\ntorch.manual_seed(1)\nif use_cuda:\n torch.cuda.manual_seed(1)\nnp.random.seed(1)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\nprint('DCCNet training script')\nparser = argparse.ArgumentParser(description='Compute PF Pascal matches')\nparser.add_argument('--checkpoint', type=str, default='')\nparser.add_argument('--image_size', type=int, default=400)\nparser.add_argument('--dataset_image_path', type=str, default=\n 'datasets/pf-pascal/', help='path to PF Pascal dataset')\nparser.add_argument('--dataset_csv_path', type=str, default=\n 'datasets/pf-pascal/image_pairs/', help='path to PF Pascal training csv')\nparser.add_argument('--num_epochs', type=int, default=5, help=\n 'number of training epochs')\nparser.add_argument('--batch_size', type=int, default=16, help=\n 'training batch size')\nparser.add_argument('--lr', type=float, default=0.0005, help='learning rate')\nparser.add_argument('--result_model_fn', type=str, default=\n 'checkpoint_adam', help='trained model filename')\nparser.add_argument('--result-model-dir', type=str, default=\n '../model/checkpoints', help='path to trained models folder')\nparser.add_argument('--fe_finetune_params', type=int, default=0, help=\n 'number of layers to finetune')\nparser.add_argument('--exp_name', type=str, default='exp_delete', help=\n 'experiment name')\nparser.add_argument('--ncons_kernel_sizes', nargs='+', type=int, default=[5,\n 5, 5], help='kernels sizes in neigh. cons.')\nparser.add_argument('--ncons_channels', nargs='+', type=int, default=[16, \n 16, 1], help='channels in neigh. cons')\nparser.add_argument('--sce_kernel_size', type=int, default=25, help=\n 'kernel size in sce.')\nparser.add_argument('--sce_hidden_dim', type=int, default=1024, help=\n 'hidden dim in sce')\nparser.add_argument('--scaleloss_weight', type=float, default=1.0, help=\n 'whether use scale loss, if use the weight for scale loss')\nparser.add_argument('--att_scale_ncons_kernel_sizes', nargs='+', type=int,\n default=[5, 5, 5], help='kernels sizes in dynamic fusion net.')\nparser.add_argument('--att_scale_ncons_channels', nargs='+', type=int,\n default=[16, 16, 1], help='channels in dynamic fusion net')\nargs = parser.parse_args()\nprint(args)\nprint('Creating CNN model...')\nmodel = DCCNet(use_cuda=use_cuda, checkpoint=args.checkpoint,\n ncons_kernel_sizes=args.ncons_kernel_sizes, ncons_channels=args.\n ncons_channels, sce_kernel_size=args.sce_kernel_size, sce_hidden_dim=\n args.sce_hidden_dim, att_scale_ncons_kernel_sizes=args.\n att_scale_ncons_kernel_sizes, att_scale_ncons_channels=args.\n att_scale_ncons_channels)\nmodel = nn.DataParallel(model)\nif args.fe_finetune_params > 0:\n for i in range(args.fe_finetune_params):\n for p in model.module.FeatureExtraction.model[-1][-(i + 1)].parameters(\n ):\n p.requires_grad = True\nprint('Trainable parameters:')\ncount = 0\nfor i, param in enumerate(model.named_parameters()):\n name, p = param\n if p.requires_grad:\n count += 1\n print(str(count) + ': ' + name + '\\t' + str(p.shape) + '\\t')\nprint(model)\nprint('using Adam optimizer')\noptimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()\n ), lr=args.lr)\ncnn_image_size = args.image_size, args.image_size\nDataset = ImagePairDataset\ntrain_csv = 'train_pairs.csv'\nval_nocoordinates_csv = 'val_pairs_nocoords.csv'\nval_csv = 'image_pairs/val_pairs.csv'\nnormalization_tnf = NormalizeImageDict(['source_image', 'target_image'])\nbatch_preprocessing_fn = BatchTensorToVars(use_cuda=use_cuda)\ndataset = Dataset(transform=normalization_tnf, dataset_image_path=args.\n dataset_image_path, dataset_csv_path=args.dataset_csv_path,\n dataset_csv_file=train_csv, output_size=cnn_image_size)\ndataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=0)\ndataset_val = Dataset(transform=normalization_tnf, dataset_image_path=args.\n dataset_image_path, dataset_csv_path=args.dataset_csv_path,\n dataset_csv_file=val_nocoordinates_csv, output_size=cnn_image_size)\ndataloader_val = DataLoader(dataset_val, batch_size=args.batch_size,\n shuffle=True, num_workers=4)\ndataloader_val_pck = pfpascal_val_dataloader(image_size=args.image_size,\n eval_dataset_path=args.dataset_image_path, csv_file=val_csv)\ncheckpoint_dir = os.path.join(args.result_model_dir, args.exp_name)\ncheckpoint_name = os.path.join(args.result_model_dir, args.exp_name, \n datetime.datetime.now().strftime('%Y-%m-%d_%H:%M') + '_' + args.\n result_model_fn + '.pth.tar')\nlog_name = os.path.join(args.result_model_dir, args.exp_name, 'logmain_' +\n args.exp_name + '.txt')\nif not exists(dirname(log_name)):\n makedirs(dirname(log_name))\nprint('Checkpoint name: ' + checkpoint_name)\nbest_val_pck = float('-inf')\nloss_fn = lambda model, batch: weak_loss(model, batch, normalization=\n 'softmax', scaleloss_weight=args.scaleloss_weight)\n\n\ndef process_epoch(mode, epoch, model, loss_fn, optimizer, dataloader,\n batch_preprocessing_fn, use_cuda=True, log_interval=50):\n epoch_loss = 0\n for batch_idx, batch in enumerate(dataloader):\n st = time.time()\n if mode == 'train':\n optimizer.zero_grad()\n tnf_batch = batch_preprocessing_fn(batch)\n loss = loss_fn(model, tnf_batch)\n loss_np = loss.data.cpu().numpy()[0]\n epoch_loss += loss_np\n if mode == 'train':\n loss.backward()\n optimizer.step()\n else:\n loss = None\n if batch_idx % log_interval == 0:\n print(mode.capitalize() +\n ' Epoch: {} [{}/{} ({:.0f}%)]\\t\\tLoss: {:.12f}\\t\\tcost time: {:.1f}'\n .format(epoch, batch_idx, len(dataloader), 100.0 *\n batch_idx / len(dataloader), loss_np, time.time() - st))\n epoch_loss /= len(dataloader)\n print(mode.capitalize() + ' set: Average loss: {:.12f}'.format(epoch_loss))\n return epoch_loss\n\n\ntrain_loss = np.zeros(args.num_epochs)\nval_loss = np.zeros(args.num_epochs)\nval_pcks = np.zeros(args.num_epochs)\nmodel.module.FeatureExtraction.eval()\nprint('Starting training...')\nfor epoch in range(1, args.num_epochs + 1):\n st = time.time()\n train_loss_curepoch = process_epoch('train', epoch, model, loss_fn,\n optimizer, dataloader, batch_preprocessing_fn, log_interval=1)\n time_train = time.time() - st\n st = time.time()\n val_loss_curepoch = process_epoch('val', epoch, model, loss_fn,\n optimizer, dataloader_val, batch_preprocessing_fn, log_interval=1)\n time_valloss = time.time() - st\n st = time.time()\n val_pck_curepoch = pfdataset_pck(dataloader=dataloader_val_pck, model=\n model, verbose=False)\n time_valpck = time.time() - st\n train_loss[epoch - 1] = train_loss_curepoch\n val_loss[epoch - 1] = val_loss_curepoch\n val_pcks[epoch - 1] = val_pck_curepoch\n is_best = val_pcks[epoch - 1] > best_val_pck\n best_val_pck = max(val_pcks[epoch - 1], best_val_pck)\n save_checkpoint({'epoch': epoch, 'args': args, 'state_dict': model.\n state_dict(), 'optimizer': optimizer.state_dict(), 'train_loss':\n train_loss, 'val_loss': val_loss, 'val_pck': val_pcks,\n 'best_val_pck': best_val_pck}, is_best, checkpoint_name,\n save_all_epochs=False)\n message = (\n \"\"\"Epoch{}\tTrain_loss{:.6f}\tcost time{:.1f}\tVal_loss{:.6f}\tcost time{:.1f}\tVal_pck{:.6f}\tcost time{:.1f}\n\"\"\"\n .format(epoch, train_loss_curepoch, time_train, val_loss_curepoch,\n time_valloss, val_pck_curepoch, time_valpck))\n print(message)\n with open(log_name, 'a') as log_file:\n log_file.write('%s\\n' % message)\nprint('Done!')\n", "<import token>\n<assignment token>\ntorch.manual_seed(1)\nif use_cuda:\n torch.cuda.manual_seed(1)\nnp.random.seed(1)\n<assignment token>\nprint('DCCNet training script')\n<assignment token>\nparser.add_argument('--checkpoint', type=str, default='')\nparser.add_argument('--image_size', type=int, default=400)\nparser.add_argument('--dataset_image_path', type=str, default=\n 'datasets/pf-pascal/', help='path to PF Pascal dataset')\nparser.add_argument('--dataset_csv_path', type=str, default=\n 'datasets/pf-pascal/image_pairs/', help='path to PF Pascal training csv')\nparser.add_argument('--num_epochs', type=int, default=5, help=\n 'number of training epochs')\nparser.add_argument('--batch_size', type=int, default=16, help=\n 'training batch size')\nparser.add_argument('--lr', type=float, default=0.0005, help='learning rate')\nparser.add_argument('--result_model_fn', type=str, default=\n 'checkpoint_adam', help='trained model filename')\nparser.add_argument('--result-model-dir', type=str, default=\n '../model/checkpoints', help='path to trained models folder')\nparser.add_argument('--fe_finetune_params', type=int, default=0, help=\n 'number of layers to finetune')\nparser.add_argument('--exp_name', type=str, default='exp_delete', help=\n 'experiment name')\nparser.add_argument('--ncons_kernel_sizes', nargs='+', type=int, default=[5,\n 5, 5], help='kernels sizes in neigh. cons.')\nparser.add_argument('--ncons_channels', nargs='+', type=int, default=[16, \n 16, 1], help='channels in neigh. cons')\nparser.add_argument('--sce_kernel_size', type=int, default=25, help=\n 'kernel size in sce.')\nparser.add_argument('--sce_hidden_dim', type=int, default=1024, help=\n 'hidden dim in sce')\nparser.add_argument('--scaleloss_weight', type=float, default=1.0, help=\n 'whether use scale loss, if use the weight for scale loss')\nparser.add_argument('--att_scale_ncons_kernel_sizes', nargs='+', type=int,\n default=[5, 5, 5], help='kernels sizes in dynamic fusion net.')\nparser.add_argument('--att_scale_ncons_channels', nargs='+', type=int,\n default=[16, 16, 1], help='channels in dynamic fusion net')\n<assignment token>\nprint(args)\nprint('Creating CNN model...')\n<assignment token>\nif args.fe_finetune_params > 0:\n for i in range(args.fe_finetune_params):\n for p in model.module.FeatureExtraction.model[-1][-(i + 1)].parameters(\n ):\n p.requires_grad = True\nprint('Trainable parameters:')\n<assignment token>\nfor i, param in enumerate(model.named_parameters()):\n name, p = param\n if p.requires_grad:\n count += 1\n print(str(count) + ': ' + name + '\\t' + str(p.shape) + '\\t')\nprint(model)\nprint('using Adam optimizer')\n<assignment token>\nif not exists(dirname(log_name)):\n makedirs(dirname(log_name))\nprint('Checkpoint name: ' + checkpoint_name)\n<assignment token>\n\n\ndef process_epoch(mode, epoch, model, loss_fn, optimizer, dataloader,\n batch_preprocessing_fn, use_cuda=True, log_interval=50):\n epoch_loss = 0\n for batch_idx, batch in enumerate(dataloader):\n st = time.time()\n if mode == 'train':\n optimizer.zero_grad()\n tnf_batch = batch_preprocessing_fn(batch)\n loss = loss_fn(model, tnf_batch)\n loss_np = loss.data.cpu().numpy()[0]\n epoch_loss += loss_np\n if mode == 'train':\n loss.backward()\n optimizer.step()\n else:\n loss = None\n if batch_idx % log_interval == 0:\n print(mode.capitalize() +\n ' Epoch: {} [{}/{} ({:.0f}%)]\\t\\tLoss: {:.12f}\\t\\tcost time: {:.1f}'\n .format(epoch, batch_idx, len(dataloader), 100.0 *\n batch_idx / len(dataloader), loss_np, time.time() - st))\n epoch_loss /= len(dataloader)\n print(mode.capitalize() + ' set: Average loss: {:.12f}'.format(epoch_loss))\n return epoch_loss\n\n\n<assignment token>\nmodel.module.FeatureExtraction.eval()\nprint('Starting training...')\nfor epoch in range(1, args.num_epochs + 1):\n st = time.time()\n train_loss_curepoch = process_epoch('train', epoch, model, loss_fn,\n optimizer, dataloader, batch_preprocessing_fn, log_interval=1)\n time_train = time.time() - st\n st = time.time()\n val_loss_curepoch = process_epoch('val', epoch, model, loss_fn,\n optimizer, dataloader_val, batch_preprocessing_fn, log_interval=1)\n time_valloss = time.time() - st\n st = time.time()\n val_pck_curepoch = pfdataset_pck(dataloader=dataloader_val_pck, model=\n model, verbose=False)\n time_valpck = time.time() - st\n train_loss[epoch - 1] = train_loss_curepoch\n val_loss[epoch - 1] = val_loss_curepoch\n val_pcks[epoch - 1] = val_pck_curepoch\n is_best = val_pcks[epoch - 1] > best_val_pck\n best_val_pck = max(val_pcks[epoch - 1], best_val_pck)\n save_checkpoint({'epoch': epoch, 'args': args, 'state_dict': model.\n state_dict(), 'optimizer': optimizer.state_dict(), 'train_loss':\n train_loss, 'val_loss': val_loss, 'val_pck': val_pcks,\n 'best_val_pck': best_val_pck}, is_best, checkpoint_name,\n save_all_epochs=False)\n message = (\n \"\"\"Epoch{}\tTrain_loss{:.6f}\tcost time{:.1f}\tVal_loss{:.6f}\tcost time{:.1f}\tVal_pck{:.6f}\tcost time{:.1f}\n\"\"\"\n .format(epoch, train_loss_curepoch, time_train, val_loss_curepoch,\n time_valloss, val_pck_curepoch, time_valpck))\n print(message)\n with open(log_name, 'a') as log_file:\n log_file.write('%s\\n' % message)\nprint('Done!')\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef process_epoch(mode, epoch, model, loss_fn, optimizer, dataloader,\n batch_preprocessing_fn, use_cuda=True, log_interval=50):\n epoch_loss = 0\n for batch_idx, batch in enumerate(dataloader):\n st = time.time()\n if mode == 'train':\n optimizer.zero_grad()\n tnf_batch = batch_preprocessing_fn(batch)\n loss = loss_fn(model, tnf_batch)\n loss_np = loss.data.cpu().numpy()[0]\n epoch_loss += loss_np\n if mode == 'train':\n loss.backward()\n optimizer.step()\n else:\n loss = None\n if batch_idx % log_interval == 0:\n print(mode.capitalize() +\n ' Epoch: {} [{}/{} ({:.0f}%)]\\t\\tLoss: {:.12f}\\t\\tcost time: {:.1f}'\n .format(epoch, batch_idx, len(dataloader), 100.0 *\n batch_idx / len(dataloader), loss_np, time.time() - st))\n epoch_loss /= len(dataloader)\n print(mode.capitalize() + ' set: Average loss: {:.12f}'.format(epoch_loss))\n return epoch_loss\n\n\n<assignment token>\n<code token>\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<assignment token>\n<code token>\n" ]
false
334
a65dfca1773c1e4101ebfb953e0f617a2c345695
def merge(self, intervals): intervals.sort() arr = [] for i in intervals: if len(arr)==0 or arr[-1][1] < i[0]: arr.append(i) else: arr[-1][1] = max(arr[-1][1], i[1]) return arr
[ "def merge(self, intervals):\n intervals.sort()\n \n arr = []\n \n for i in intervals:\n if len(arr)==0 or arr[-1][1] < i[0]:\n arr.append(i)\n else:\n arr[-1][1] = max(arr[-1][1], i[1])\n \n return arr\n", "def merge(self, intervals):\n intervals.sort()\n arr = []\n for i in intervals:\n if len(arr) == 0 or arr[-1][1] < i[0]:\n arr.append(i)\n else:\n arr[-1][1] = max(arr[-1][1], i[1])\n return arr\n", "<function token>\n" ]
false
335
49005500b299ca276f663fe8431bb955e5585bbd
import Net import mnist_parser import numpy as np #To use this model it is required to download the MNIST database #The donwloaded base is then needet parse to numpy using mnist_parser.parse_to_npy method #The files genetared using mnist_parser.parse_to_npy are then loaded using np.load in_values = np.load("MNIST/mnist_train_images.npy") out_values = np.load("MNIST/mnist_train_labels.npy") out_gt_numbers=mnist_parser.one_hots_to_ints(out_values) in_testing_values = np.load("MNIST/mnist_test_images.npy") out_testing_values = np.load("MNIST/mnist_test_labels.npy") out_gt_numbers_test=mnist_parser.one_hots_to_ints(out_testing_values) while(True): net = Net.FeedForwardNet(input_count=784, layers=[100, 10], activation_function=Net.FeedForwardNet.leaky_relu) try: epoch_num=int(input("Epoch_num:")) batch_size=int(input("Batch_size:")) #30 learning_rate=float(input("Learning rate:")) #0.001 inertion_factor=float(input("Inertion factor:")) #0.5 # max_error=float(input("Maximum error")) except: print("Parse error") continue for i in range(epoch_num): batch_in,batch_out=net.generate_random_batch(in_values,out_values,batch_size) net.forward_propagation(batch_in) net.backpropagation(batch_out, learning_rate=learning_rate, inertion_factor=inertion_factor) # print("X:",net.X[-1]) # net.stochastic_backpropagation(batch_out, learning_rate=learning_rate) if i % 50 == 0: print() output=net.forward_propagation(in_testing_values) if net.check_total_squared_error(output_values=out_testing_values, epsilon=1000, verbose=True): break output_numbers=mnist_parser.one_hots_to_ints(output) correct=np.sum( out_gt_numbers_test == output_numbers) print("Epoch: ", i, " br tocnih:",correct,"/",output_numbers.size,"(",correct/output_numbers.size,"%)") output=net.forward_propagation(in_testing_values) conf_mat=net.calculate_confusion_matrix(out_testing_values) output_numbers = mnist_parser.one_hots_to_ints(output) correct=np.sum(out_gt_numbers_test == output_numbers) print("Correct:",correct,"/",output_numbers.size,"(",correct/output_numbers.size ,"%)") print(conf_mat) save=int(input("Save?(1/0)")) if(save == 1): name=input("Save as?") net.save_state(name) exit=int(input("Exit?(1/0)")) if(exit == 1): break
[ "import Net\nimport mnist_parser\nimport numpy as np\n#To use this model it is required to download the MNIST database\n#The donwloaded base is then needet parse to numpy using mnist_parser.parse_to_npy method\n#The files genetared using mnist_parser.parse_to_npy are then loaded using np.load\nin_values = np.load(\"MNIST/mnist_train_images.npy\")\nout_values = np.load(\"MNIST/mnist_train_labels.npy\")\nout_gt_numbers=mnist_parser.one_hots_to_ints(out_values)\n\nin_testing_values = np.load(\"MNIST/mnist_test_images.npy\")\nout_testing_values = np.load(\"MNIST/mnist_test_labels.npy\")\nout_gt_numbers_test=mnist_parser.one_hots_to_ints(out_testing_values)\n\nwhile(True):\n\n net = Net.FeedForwardNet(input_count=784, layers=[100, 10], activation_function=Net.FeedForwardNet.leaky_relu)\n\n try:\n epoch_num=int(input(\"Epoch_num:\"))\n batch_size=int(input(\"Batch_size:\")) #30\n learning_rate=float(input(\"Learning rate:\")) #0.001\n inertion_factor=float(input(\"Inertion factor:\")) #0.5\n # max_error=float(input(\"Maximum error\"))\n except:\n print(\"Parse error\")\n continue\n\n for i in range(epoch_num):\n batch_in,batch_out=net.generate_random_batch(in_values,out_values,batch_size)\n net.forward_propagation(batch_in)\n net.backpropagation(batch_out, learning_rate=learning_rate, inertion_factor=inertion_factor)\n # print(\"X:\",net.X[-1])\n # net.stochastic_backpropagation(batch_out, learning_rate=learning_rate)\n\n if i % 50 == 0:\n print()\n output=net.forward_propagation(in_testing_values)\n if net.check_total_squared_error(output_values=out_testing_values, epsilon=1000, verbose=True):\n break\n output_numbers=mnist_parser.one_hots_to_ints(output)\n correct=np.sum( out_gt_numbers_test == output_numbers)\n print(\"Epoch: \", i, \" br tocnih:\",correct,\"/\",output_numbers.size,\"(\",correct/output_numbers.size,\"%)\")\n\n\n output=net.forward_propagation(in_testing_values)\n conf_mat=net.calculate_confusion_matrix(out_testing_values)\n\n output_numbers = mnist_parser.one_hots_to_ints(output)\n correct=np.sum(out_gt_numbers_test == output_numbers)\n print(\"Correct:\",correct,\"/\",output_numbers.size,\"(\",correct/output_numbers.size ,\"%)\")\n print(conf_mat)\n\n\n save=int(input(\"Save?(1/0)\"))\n if(save == 1):\n name=input(\"Save as?\")\n net.save_state(name)\n exit=int(input(\"Exit?(1/0)\"))\n if(exit == 1):\n break\n\n", "import Net\nimport mnist_parser\nimport numpy as np\nin_values = np.load('MNIST/mnist_train_images.npy')\nout_values = np.load('MNIST/mnist_train_labels.npy')\nout_gt_numbers = mnist_parser.one_hots_to_ints(out_values)\nin_testing_values = np.load('MNIST/mnist_test_images.npy')\nout_testing_values = np.load('MNIST/mnist_test_labels.npy')\nout_gt_numbers_test = mnist_parser.one_hots_to_ints(out_testing_values)\nwhile True:\n net = Net.FeedForwardNet(input_count=784, layers=[100, 10],\n activation_function=Net.FeedForwardNet.leaky_relu)\n try:\n epoch_num = int(input('Epoch_num:'))\n batch_size = int(input('Batch_size:'))\n learning_rate = float(input('Learning rate:'))\n inertion_factor = float(input('Inertion factor:'))\n except:\n print('Parse error')\n continue\n for i in range(epoch_num):\n batch_in, batch_out = net.generate_random_batch(in_values,\n out_values, batch_size)\n net.forward_propagation(batch_in)\n net.backpropagation(batch_out, learning_rate=learning_rate,\n inertion_factor=inertion_factor)\n if i % 50 == 0:\n print()\n output = net.forward_propagation(in_testing_values)\n if net.check_total_squared_error(output_values=\n out_testing_values, epsilon=1000, verbose=True):\n break\n output_numbers = mnist_parser.one_hots_to_ints(output)\n correct = np.sum(out_gt_numbers_test == output_numbers)\n print('Epoch: ', i, ' br tocnih:', correct, '/', output_numbers\n .size, '(', correct / output_numbers.size, '%)')\n output = net.forward_propagation(in_testing_values)\n conf_mat = net.calculate_confusion_matrix(out_testing_values)\n output_numbers = mnist_parser.one_hots_to_ints(output)\n correct = np.sum(out_gt_numbers_test == output_numbers)\n print('Correct:', correct, '/', output_numbers.size, '(', correct /\n output_numbers.size, '%)')\n print(conf_mat)\n save = int(input('Save?(1/0)'))\n if save == 1:\n name = input('Save as?')\n net.save_state(name)\n exit = int(input('Exit?(1/0)'))\n if exit == 1:\n break\n", "<import token>\nin_values = np.load('MNIST/mnist_train_images.npy')\nout_values = np.load('MNIST/mnist_train_labels.npy')\nout_gt_numbers = mnist_parser.one_hots_to_ints(out_values)\nin_testing_values = np.load('MNIST/mnist_test_images.npy')\nout_testing_values = np.load('MNIST/mnist_test_labels.npy')\nout_gt_numbers_test = mnist_parser.one_hots_to_ints(out_testing_values)\nwhile True:\n net = Net.FeedForwardNet(input_count=784, layers=[100, 10],\n activation_function=Net.FeedForwardNet.leaky_relu)\n try:\n epoch_num = int(input('Epoch_num:'))\n batch_size = int(input('Batch_size:'))\n learning_rate = float(input('Learning rate:'))\n inertion_factor = float(input('Inertion factor:'))\n except:\n print('Parse error')\n continue\n for i in range(epoch_num):\n batch_in, batch_out = net.generate_random_batch(in_values,\n out_values, batch_size)\n net.forward_propagation(batch_in)\n net.backpropagation(batch_out, learning_rate=learning_rate,\n inertion_factor=inertion_factor)\n if i % 50 == 0:\n print()\n output = net.forward_propagation(in_testing_values)\n if net.check_total_squared_error(output_values=\n out_testing_values, epsilon=1000, verbose=True):\n break\n output_numbers = mnist_parser.one_hots_to_ints(output)\n correct = np.sum(out_gt_numbers_test == output_numbers)\n print('Epoch: ', i, ' br tocnih:', correct, '/', output_numbers\n .size, '(', correct / output_numbers.size, '%)')\n output = net.forward_propagation(in_testing_values)\n conf_mat = net.calculate_confusion_matrix(out_testing_values)\n output_numbers = mnist_parser.one_hots_to_ints(output)\n correct = np.sum(out_gt_numbers_test == output_numbers)\n print('Correct:', correct, '/', output_numbers.size, '(', correct /\n output_numbers.size, '%)')\n print(conf_mat)\n save = int(input('Save?(1/0)'))\n if save == 1:\n name = input('Save as?')\n net.save_state(name)\n exit = int(input('Exit?(1/0)'))\n if exit == 1:\n break\n", "<import token>\n<assignment token>\nwhile True:\n net = Net.FeedForwardNet(input_count=784, layers=[100, 10],\n activation_function=Net.FeedForwardNet.leaky_relu)\n try:\n epoch_num = int(input('Epoch_num:'))\n batch_size = int(input('Batch_size:'))\n learning_rate = float(input('Learning rate:'))\n inertion_factor = float(input('Inertion factor:'))\n except:\n print('Parse error')\n continue\n for i in range(epoch_num):\n batch_in, batch_out = net.generate_random_batch(in_values,\n out_values, batch_size)\n net.forward_propagation(batch_in)\n net.backpropagation(batch_out, learning_rate=learning_rate,\n inertion_factor=inertion_factor)\n if i % 50 == 0:\n print()\n output = net.forward_propagation(in_testing_values)\n if net.check_total_squared_error(output_values=\n out_testing_values, epsilon=1000, verbose=True):\n break\n output_numbers = mnist_parser.one_hots_to_ints(output)\n correct = np.sum(out_gt_numbers_test == output_numbers)\n print('Epoch: ', i, ' br tocnih:', correct, '/', output_numbers\n .size, '(', correct / output_numbers.size, '%)')\n output = net.forward_propagation(in_testing_values)\n conf_mat = net.calculate_confusion_matrix(out_testing_values)\n output_numbers = mnist_parser.one_hots_to_ints(output)\n correct = np.sum(out_gt_numbers_test == output_numbers)\n print('Correct:', correct, '/', output_numbers.size, '(', correct /\n output_numbers.size, '%)')\n print(conf_mat)\n save = int(input('Save?(1/0)'))\n if save == 1:\n name = input('Save as?')\n net.save_state(name)\n exit = int(input('Exit?(1/0)'))\n if exit == 1:\n break\n", "<import token>\n<assignment token>\n<code token>\n" ]
false
336
219929d52b5f1a0690590e83b41d2b4f0b2b3a51
list = [3,1,2,5,4,7,6] def sort(list): for i in range(len(list)-1): if list[i] > list[i+1]: a = list[i] list[i] = list[i+1] list[i+1] = a print(list) sort(list)
[ "list = [3,1,2,5,4,7,6]\n\ndef sort(list):\n\n for i in range(len(list)-1):\n if list[i] > list[i+1]:\n a = list[i]\n list[i] = list[i+1]\n list[i+1] = a\n print(list) \n\nsort(list)", "list = [3, 1, 2, 5, 4, 7, 6]\n\n\ndef sort(list):\n for i in range(len(list) - 1):\n if list[i] > list[i + 1]:\n a = list[i]\n list[i] = list[i + 1]\n list[i + 1] = a\n print(list)\n\n\nsort(list)\n", "<assignment token>\n\n\ndef sort(list):\n for i in range(len(list) - 1):\n if list[i] > list[i + 1]:\n a = list[i]\n list[i] = list[i + 1]\n list[i + 1] = a\n print(list)\n\n\nsort(list)\n", "<assignment token>\n\n\ndef sort(list):\n for i in range(len(list) - 1):\n if list[i] > list[i + 1]:\n a = list[i]\n list[i] = list[i + 1]\n list[i + 1] = a\n print(list)\n\n\n<code token>\n", "<assignment token>\n<function token>\n<code token>\n" ]
false
337
e884ce5878de75afe93085e2310b4b8d5953963a
''' Created on 13 Dec 2016 @author: hpcosta ''' # https://www.hackerrank.com/challenges/backreferences-to-failed-groups regex = r"^\d{2}(-?)\d{2}\1\d{2}\1\d{2}$" # Do not delete 'r'. import re print(str(bool(re.search(regex, raw_input()))).lower()) # Task # # You have a test string S. # Your task is to write a regex which will match S, with following condition(s): # # S consists of 8 digits. # S may have "-" separator such that string S gets divided in 4 parts, with each part having exactly two digits. (Eg. 12-34-56-78) # Valid # # 12345678 # 12-34-56-87 # Invalid # # 1-234-56-78 # 12-45-7810
[ "'''\nCreated on 13 Dec 2016\n\n@author: hpcosta\n'''\n# https://www.hackerrank.com/challenges/backreferences-to-failed-groups\n\nregex = r\"^\\d{2}(-?)\\d{2}\\1\\d{2}\\1\\d{2}$\" # Do not delete 'r'.\n\nimport re\n\nprint(str(bool(re.search(regex, raw_input()))).lower())\n\n\n\n# Task\n# \n# You have a test string S. \n# Your task is to write a regex which will match S, with following condition(s):\n# \n# S consists of 8 digits.\n# S may have \"-\" separator such that string S gets divided in 4 parts, with each part having exactly two digits. (Eg. 12-34-56-78)\n# Valid \n# \n# 12345678\n# 12-34-56-87\n# Invalid \n# \n# 1-234-56-78\n# 12-45-7810", "<docstring token>\nregex = '^\\\\d{2}(-?)\\\\d{2}\\\\1\\\\d{2}\\\\1\\\\d{2}$'\nimport re\nprint(str(bool(re.search(regex, raw_input()))).lower())\n", "<docstring token>\nregex = '^\\\\d{2}(-?)\\\\d{2}\\\\1\\\\d{2}\\\\1\\\\d{2}$'\n<import token>\nprint(str(bool(re.search(regex, raw_input()))).lower())\n", "<docstring token>\n<assignment token>\n<import token>\nprint(str(bool(re.search(regex, raw_input()))).lower())\n", "<docstring token>\n<assignment token>\n<import token>\n<code token>\n" ]
false
338
9951588f581c5045154a77535b36d230d586d8a5
from OpenSSL import SSL, crypto from twisted.internet import ssl, reactor from twisted.internet.protocol import Factory, Protocol import os from time import time class Echo(Protocol): def dataReceived(self, data): print "Data received: " + data # define cases options = { "generate": self.generateCertificate, "sign": self.signCertificate } tmp = data.split(';') method = tmp.pop(0) print "method is " + method #TODO: catch unknown cases # delegate case to method result = options[method](tmp) self.transport.write(result) def generateCertificate(self, userDataList): # generate a key-pair with RSA and 2048 bits pkey = crypto.PKey() pkey.generate_key(crypto.TYPE_RSA, 2048) # create a new certificate of x509 structure x509 = crypto.X509() # X509Name type subject = self.setSubject(x509.get_subject(), userDataList) #x509.set_subject(subject) # list of (name, value) tuples subComponents = subject.get_components() for (name, value) in subComponents: print name + " is " + value # cert is valid immediately x509.gmtime_adj_notBefore(0) # cert gets invalid after 10 years x509.gmtime_adj_notAfter(10*365*24*60*60) #TODO: load our CA root cert(PKCS12 type) and set subject as issuer # set issuer (CA) data x509.set_issuer(x509.get_subject()) print "Issuer set - ACTUALLY SELF-SIGNED MODE!!!" # set user public key x509.set_pubkey(pkey) #TODO: which algorithm to use? (replace with sha512) #TODO: replace key with CA private key # sign the certificate x509.sign(pkey, 'sha256') print "Certificate signed - ACTUALLY SELF-SIGNED MODE!!!" # create a new PKCS12 object pkcs12 = crypto.PKCS12() # set the new user certificate pkcs12.set_certificate(x509) # insert user private key pkcs12.set_privatekey(pkey) # create a dump of PKCS12 and return return pkcs12.export() def setSubject(self, subject, data): #subjectVariables = { # "C": subject.C, # "ST": subject.ST, # "L": subject.L, # "O": subject.O, # "OU": subject.OU, # "CN": subject.CN #} for d in data: s = d.split('=') variable = s[0] value = s[1] print "Setting variable " + variable + " to " + value + " on subject" #subjectVariables[variable] = value if variable == "C": subject.C = value elif variable == "ST": subject.ST = value elif variable == "L": subject.L = value elif variable == "O": subject.O = value elif variable == "OU": subject.OU = value elif variable == "CN": subject.CN = value return subject def signCertificate(self, certData): x509 = crypto.X509() pkcs12 = crypto.load_pkcs12(certData) req = pkcs12.get_certificate() x509.set_subject(req.get_subject()) x509.set_pubkey(req.get_pubkey()) #issuer aus Datei setzen # cert is valid immediately x509.gmtime_adj_notBefore(0) # cert gets invalid after 10 years x509.gmtime_adj_notAfter(10*365*24*60*60) x509.sign(pkey, 'sha256') pkcs12.set_certificate(x509) return pkcs12.export() def verifyCallback(connection, x509, errnum, errdepth, ok): if not ok: print 'invalid cert from subject:', x509.get_subject() return False else: print "Certs are fine", x509.get_subject() return True def getTimestamp(): return str(int(round(time() * 1000))) def addTimestamp(millis, name): print millis + '_' + name if __name__ == '__main__': factory = Factory() factory.protocol = Echo os.system("echo 'Server started...'") myContextFactory = ssl.DefaultOpenSSLContextFactory( 'keys/ca-key.pem', 'keys/ca-root.pem' ) ctx = myContextFactory.getContext() # SSL.VERIFY_PEER: Verifizierung des verwendeten SSL-Certs vorraussetzen (default=true) # VERIFY_FAIL_IF_NO_PEER_CERT: Vorgang wird abgebrochen, wenn die Verbindung ohne Zertifikat # verwendet wird (setzt obigen Parameer vorraus!) ctx.set_verify( SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT, verifyCallback ) # Since we have self-signed certs we have to explicitly # tell the server to trust them. ctx.load_verify_locations("keys/ca-root.pem") reactor.listenSSL(8000, factory, myContextFactory) reactor.run()
[ "from OpenSSL import SSL, crypto\nfrom twisted.internet import ssl, reactor\nfrom twisted.internet.protocol import Factory, Protocol\n\nimport os\nfrom time import time\n\nclass Echo(Protocol):\n\n def dataReceived(self, data):\n print \"Data received: \" + data\n\n # define cases\n options = {\n \"generate\": self.generateCertificate,\n \"sign\": self.signCertificate\n }\n \n tmp = data.split(';')\n method = tmp.pop(0)\n print \"method is \" + method\n \n #TODO: catch unknown cases\n # delegate case to method\n result = options[method](tmp)\n \n self.transport.write(result)\n\n def generateCertificate(self, userDataList):\n # generate a key-pair with RSA and 2048 bits\n pkey = crypto.PKey()\n pkey.generate_key(crypto.TYPE_RSA, 2048)\n \n # create a new certificate of x509 structure\n x509 = crypto.X509()\n \n # X509Name type\n subject = self.setSubject(x509.get_subject(), userDataList)\n #x509.set_subject(subject)\n \n # list of (name, value) tuples\n subComponents = subject.get_components()\n for (name, value) in subComponents:\n print name + \" is \" + value\n \n # cert is valid immediately\n x509.gmtime_adj_notBefore(0)\n \n # cert gets invalid after 10 years\n x509.gmtime_adj_notAfter(10*365*24*60*60)\n \n #TODO: load our CA root cert(PKCS12 type) and set subject as issuer\n # set issuer (CA) data\n x509.set_issuer(x509.get_subject())\n print \"Issuer set - ACTUALLY SELF-SIGNED MODE!!!\"\n \n # set user public key\n x509.set_pubkey(pkey)\n \n #TODO: which algorithm to use? (replace with sha512)\n #TODO: replace key with CA private key\n # sign the certificate\n x509.sign(pkey, 'sha256')\n print \"Certificate signed - ACTUALLY SELF-SIGNED MODE!!!\"\n \n # create a new PKCS12 object\n pkcs12 = crypto.PKCS12()\n \n # set the new user certificate\n pkcs12.set_certificate(x509)\n \n # insert user private key\n pkcs12.set_privatekey(pkey)\n \n # create a dump of PKCS12 and return\n return pkcs12.export()\n \n def setSubject(self, subject, data):\n #subjectVariables = {\n # \"C\": subject.C,\n # \"ST\": subject.ST,\n # \"L\": subject.L,\n # \"O\": subject.O,\n # \"OU\": subject.OU,\n # \"CN\": subject.CN\n #}\n \n for d in data:\n s = d.split('=')\n variable = s[0]\n value = s[1]\n print \"Setting variable \" + variable + \" to \" + value + \" on subject\"\n #subjectVariables[variable] = value\n if variable == \"C\":\n subject.C = value\n elif variable == \"ST\":\n subject.ST = value\n elif variable == \"L\":\n subject.L = value\n elif variable == \"O\":\n subject.O = value\n elif variable == \"OU\":\n subject.OU = value\n elif variable == \"CN\":\n subject.CN = value\n \n return subject\n \n def signCertificate(self, certData):\n\n x509 = crypto.X509()\n pkcs12 = crypto.load_pkcs12(certData)\n req = pkcs12.get_certificate()\n x509.set_subject(req.get_subject())\n x509.set_pubkey(req.get_pubkey())\n\n #issuer aus Datei setzen\n\n # cert is valid immediately\n x509.gmtime_adj_notBefore(0)\n \n # cert gets invalid after 10 years\n x509.gmtime_adj_notAfter(10*365*24*60*60)\n\n x509.sign(pkey, 'sha256')\n\n pkcs12.set_certificate(x509)\n\n return pkcs12.export()\n \n\ndef verifyCallback(connection, x509, errnum, errdepth, ok):\n if not ok:\n print 'invalid cert from subject:', x509.get_subject()\n return False\n else:\n print \"Certs are fine\", x509.get_subject()\n return True\n\ndef getTimestamp():\n return str(int(round(time() * 1000)))\n\ndef addTimestamp(millis, name):\n print millis + '_' + name\n\nif __name__ == '__main__':\n factory = Factory()\n factory.protocol = Echo\n\n os.system(\"echo 'Server started...'\")\n\n myContextFactory = ssl.DefaultOpenSSLContextFactory(\n 'keys/ca-key.pem', 'keys/ca-root.pem'\n )\n\n ctx = myContextFactory.getContext()\n\n # SSL.VERIFY_PEER: Verifizierung des verwendeten SSL-Certs vorraussetzen (default=true)\n # VERIFY_FAIL_IF_NO_PEER_CERT: Vorgang wird abgebrochen, wenn die Verbindung ohne Zertifikat \n # verwendet wird (setzt obigen Parameer vorraus!)\n ctx.set_verify(\n SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT,\n verifyCallback\n )\n\n # Since we have self-signed certs we have to explicitly\n # tell the server to trust them.\n ctx.load_verify_locations(\"keys/ca-root.pem\")\n\n reactor.listenSSL(8000, factory, myContextFactory)\n reactor.run()\n" ]
true
339
302accfd5001a27c7bbe6081856d43dbec704168
import asyncio import logging import random from aiogram.dispatcher import FSMContext from aiogram.types import ContentTypes, Message, CallbackQuery from aiogram.utils.exceptions import BotBlocked import keyboards from data.config import ADMINS, ADMIN_CHAT_ID from keyboards.inline.activate_menu import active_menu_callback from loader import dp, db, storage from utils import text from utils.db_api import redis_commands from utils.jobs import cur_bot_info from utils.misc import rate_limit @dp.message_handler(commands="upload", user_id=ADMINS, state="*") async def upload_profile(command_msg: Message, state: FSMContext): profile_msg = command_msg.reply_to_message admin = command_msg.from_user param = command_msg.get_args() if not profile_msg: await command_msg.answer("Чтобы загрузить анкету сделай на неё REPLY") return elif param != "g" and param != "b": await command_msg.answer("Чтобы воспользоваться командой /upload нужно добавить параметры <b>b | g</b>") return other_bot = profile_msg.forward_from if not other_bot or other_bot.id != 1234060895: await profile_msg.reply("Загружать анкеты можно только из нашего БотаX :)") return elif (not profile_msg.photo and not profile_msg.video) or not profile_msg.caption: await profile_msg.reply("Загружать нужно именно анкету, а не части анкеты") return profile_data = text.get_parse_data(profile_msg.caption) if profile_msg.photo: media_id = profile_msg.photo[-1].file_id with_video = False else: media_id = profile_msg.video.file_id with_video = True profile_data.update( id=random.randint(1, 100000), username="f", media_id=media_id, with_video=with_video, sex=1 if param == "g" else 2 ) await db.add_user(**profile_data) await profile_msg.reply("Пользователь {}-{} успешно добавлен ✅" "".format(profile_data["user_nick"], profile_data["id"])) logging.info(f"Admin @{admin.username}-{admin.id} successfully " f"added fake {profile_data['user_nick']}-{profile_data['id']} ") @dp.message_handler(commands="get_msg_info", user_id=ADMINS, state="*") async def get_msg_info(command_msg: Message, state: FSMContext): msg = command_msg.reply_to_message await command_msg.delete() if not msg: await command_msg.answer("Нужно делать реплай на сообщение.") return state = await state.get_state() await msg.reply(f"Эхо в состоянии <code>{state}</code>.\n" f"\nСодержание сообщения:\n" f"\n<code>{msg}</code>\n" f"\ncontent_type = {msg.content_type}\n" f"\nentities={msg.entities}") @dp.message_handler(commands="ban_user", user_id=ADMINS, state="*") async def ban_user(command_msg: Message, state: FSMContext): ban_user_id = command_msg.get_args() admin = command_msg.from_user await command_msg.delete() if not ban_user_id or not ban_user_id.isdecimal(): await command_msg.answer(f"Формат команды: /ban_user user_id") return ban_user_id = int(ban_user_id) is_banned = await db.ban_user(ban_user_id) if not is_banned: await command_msg.answer(f"Пользователя с таким <user_id> не существует") return await redis_commands.ban_user(ban_user_id) await command_msg.answer("Пользователь({}) успешно забанен 😎".format(ban_user_id)) logging.info(f"Admin @{admin.username}-{admin.id} BAN USER-{ban_user_id}") @dp.message_handler(commands="unban_user", user_id=ADMINS, state="*") async def unban_user(command_msg: Message, state: FSMContext): unban_user_id = command_msg.get_args() admin = command_msg.from_user await command_msg.delete() if not unban_user_id or not unban_user_id.isdecimal(): await command_msg.answer(f"Формат команды: /unban_user user_id") return unban_user_id = int(unban_user_id) is_unbanned = await db.unban_user(unban_user_id) if not is_unbanned: await command_msg.answer(f"Пользователя с таким <user_id> не существует") return await redis_commands.unban_user(unban_user_id) await command_msg.answer("Пользователь({}) успешно разбанен 👻".format(unban_user_id)) logging.info(f"Admin @{admin.username}-{admin.id} UNBAN USER-{unban_user_id}") @dp.message_handler(commands="clean_old_likes", user_id=ADMINS, state="*") async def clean_old_likes(command_msg: Message, state: FSMContext): admin = command_msg.from_user await command_msg.delete() count = await db.clean_old_likes(interval=24) await command_msg.answer("Было успешно удалено {} старых лайков(за {} hours)".format(count, 24)) logging.info(f"Admin @{admin.username}-{admin.id} delete old likes(count={count})") @dp.message_handler(commands="say_to_all_now_go", user_id=ADMINS, state="*") async def say_to_all(command_msg: Message, state: FSMContext): admin = command_msg.from_user msg = command_msg.reply_to_message await command_msg.delete() if not msg: await command_msg.answer("Чтобы воспользоваться этой командой сделай REPLY") return active_user_ids = await db.get_all_users(active=True) # [375766905, 997319478] delete_bot_count = 0 for user_id in active_user_ids: try: await dp.bot.copy_message( chat_id=user_id, from_chat_id=command_msg.chat.id, message_id=msg.message_id ) await asyncio.sleep(0.05) except BotBlocked as exc: await db.update_user(user_id, active=False) await redis_commands.clear_user(user_id) await redis_commands.clear_search_ids(user_id) delete_bot_count += 1 await msg.reply("Сообщение успешно отправлено: оставили бот({}), заблокировали бот({})" "".format(len(active_user_ids) - delete_bot_count, delete_bot_count)) logging.info(f"Admin @{admin.username}-{admin.id} SAY TO ALL MSG(id={msg.message_id})") @dp.message_handler(commands="show_state_statistic", user_id=ADMINS, state="*") async def show_state_statistic(command_msg: Message, state: FSMContext): admin = command_msg.from_user statistic = dict() await command_msg.delete() states_list = await storage.get_states_list() for states_item in states_list: chat_id, user_id = states_item state_text = await storage.get_state(chat=chat_id, user=user_id, default="Deactivate bot") try: statistic[state_text] += 1 except KeyError: statistic.update({state_text: 1}) out_text = "<b>Статичктика по пользователям:</b>\n\n" for state_text, count_users in statistic.items(): out_text += f"В состоянии {state_text} — {count_users} пользователей\n\n" await command_msg.answer(out_text) logging.info(f"For Admin @{admin.username}-{admin.id} show state statistic") @rate_limit(3) @dp.message_handler(commands="show_info", user_id=ADMINS, state="*") async def show_info(command_msg: Message, state: FSMContext): admin = command_msg.from_user await command_msg.delete() await cur_bot_info(for_chat_id=command_msg.chat.id) logging.info(f"For admin @{admin.username}-{admin.id} SHOW INFO(command)") @dp.callback_query_handler(active_menu_callback.filter(), chat_id=ADMIN_CHAT_ID, state="*") async def change_active(call: CallbackQuery, state: FSMContext, callback_data: dict): active = not bool(int(callback_data["active"])) user_id = int(callback_data["user_id"]) admin = call.from_user profile_msg = call.message if active: await db.unban_user(user_id) await redis_commands.unban_user(user_id) else: await db.ban_user(user_id) await redis_commands.ban_user(user_id) await profile_msg.edit_reply_markup(keyboards.inline.get_activate_menu(user_id=user_id, active=active)) await call.answer() logging.info(f"Admin @{admin.username}-{admin.id} CHANGE ACTIVE FOR USER-{user_id} TO {active}")
[ "import asyncio\nimport logging\nimport random\n\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.types import ContentTypes, Message, CallbackQuery\nfrom aiogram.utils.exceptions import BotBlocked\n\nimport keyboards\nfrom data.config import ADMINS, ADMIN_CHAT_ID\nfrom keyboards.inline.activate_menu import active_menu_callback\nfrom loader import dp, db, storage\nfrom utils import text\nfrom utils.db_api import redis_commands\nfrom utils.jobs import cur_bot_info\nfrom utils.misc import rate_limit\n\n\[email protected]_handler(commands=\"upload\", user_id=ADMINS, state=\"*\")\nasync def upload_profile(command_msg: Message, state: FSMContext):\n profile_msg = command_msg.reply_to_message\n admin = command_msg.from_user\n param = command_msg.get_args()\n\n if not profile_msg:\n await command_msg.answer(\"Чтобы загрузить анкету сделай на неё REPLY\")\n return\n elif param != \"g\" and param != \"b\":\n await command_msg.answer(\"Чтобы воспользоваться командой /upload нужно добавить параметры <b>b | g</b>\")\n return\n\n other_bot = profile_msg.forward_from\n if not other_bot or other_bot.id != 1234060895:\n await profile_msg.reply(\"Загружать анкеты можно только из нашего БотаX :)\")\n return\n elif (not profile_msg.photo and not profile_msg.video) or not profile_msg.caption:\n await profile_msg.reply(\"Загружать нужно именно анкету, а не части анкеты\")\n return\n\n profile_data = text.get_parse_data(profile_msg.caption)\n if profile_msg.photo:\n media_id = profile_msg.photo[-1].file_id\n with_video = False\n else:\n media_id = profile_msg.video.file_id\n with_video = True\n\n profile_data.update(\n id=random.randint(1, 100000),\n username=\"f\",\n media_id=media_id,\n with_video=with_video,\n sex=1 if param == \"g\" else 2\n )\n\n await db.add_user(**profile_data)\n await profile_msg.reply(\"Пользователь {}-{} успешно добавлен ✅\"\n \"\".format(profile_data[\"user_nick\"], profile_data[\"id\"]))\n logging.info(f\"Admin @{admin.username}-{admin.id} successfully \"\n f\"added fake {profile_data['user_nick']}-{profile_data['id']} \")\n\n\[email protected]_handler(commands=\"get_msg_info\", user_id=ADMINS, state=\"*\")\nasync def get_msg_info(command_msg: Message, state: FSMContext):\n msg = command_msg.reply_to_message\n\n await command_msg.delete()\n\n if not msg:\n await command_msg.answer(\"Нужно делать реплай на сообщение.\")\n return\n\n state = await state.get_state()\n await msg.reply(f\"Эхо в состоянии <code>{state}</code>.\\n\"\n f\"\\nСодержание сообщения:\\n\"\n f\"\\n<code>{msg}</code>\\n\"\n f\"\\ncontent_type = {msg.content_type}\\n\"\n f\"\\nentities={msg.entities}\")\n\n\[email protected]_handler(commands=\"ban_user\", user_id=ADMINS, state=\"*\")\nasync def ban_user(command_msg: Message, state: FSMContext):\n ban_user_id = command_msg.get_args()\n admin = command_msg.from_user\n\n await command_msg.delete()\n\n if not ban_user_id or not ban_user_id.isdecimal():\n await command_msg.answer(f\"Формат команды: /ban_user user_id\")\n return\n ban_user_id = int(ban_user_id)\n\n is_banned = await db.ban_user(ban_user_id)\n if not is_banned:\n await command_msg.answer(f\"Пользователя с таким <user_id> не существует\")\n return\n\n await redis_commands.ban_user(ban_user_id)\n\n await command_msg.answer(\"Пользователь({}) успешно забанен 😎\".format(ban_user_id))\n\n logging.info(f\"Admin @{admin.username}-{admin.id} BAN USER-{ban_user_id}\")\n\n\[email protected]_handler(commands=\"unban_user\", user_id=ADMINS, state=\"*\")\nasync def unban_user(command_msg: Message, state: FSMContext):\n unban_user_id = command_msg.get_args()\n admin = command_msg.from_user\n\n await command_msg.delete()\n\n if not unban_user_id or not unban_user_id.isdecimal():\n await command_msg.answer(f\"Формат команды: /unban_user user_id\")\n return\n unban_user_id = int(unban_user_id)\n\n is_unbanned = await db.unban_user(unban_user_id)\n if not is_unbanned:\n await command_msg.answer(f\"Пользователя с таким <user_id> не существует\")\n return\n\n await redis_commands.unban_user(unban_user_id)\n\n await command_msg.answer(\"Пользователь({}) успешно разбанен 👻\".format(unban_user_id))\n\n logging.info(f\"Admin @{admin.username}-{admin.id} UNBAN USER-{unban_user_id}\")\n\n\[email protected]_handler(commands=\"clean_old_likes\", user_id=ADMINS, state=\"*\")\nasync def clean_old_likes(command_msg: Message, state: FSMContext):\n admin = command_msg.from_user\n\n await command_msg.delete()\n\n count = await db.clean_old_likes(interval=24)\n\n await command_msg.answer(\"Было успешно удалено {} старых лайков(за {} hours)\".format(count, 24))\n\n logging.info(f\"Admin @{admin.username}-{admin.id} delete old likes(count={count})\")\n\n\[email protected]_handler(commands=\"say_to_all_now_go\", user_id=ADMINS, state=\"*\")\nasync def say_to_all(command_msg: Message, state: FSMContext):\n admin = command_msg.from_user\n msg = command_msg.reply_to_message\n\n await command_msg.delete()\n\n if not msg:\n await command_msg.answer(\"Чтобы воспользоваться этой командой сделай REPLY\")\n return\n\n active_user_ids = await db.get_all_users(active=True) # [375766905, 997319478]\n delete_bot_count = 0\n\n for user_id in active_user_ids:\n try:\n await dp.bot.copy_message(\n chat_id=user_id,\n from_chat_id=command_msg.chat.id,\n message_id=msg.message_id\n )\n await asyncio.sleep(0.05)\n except BotBlocked as exc:\n await db.update_user(user_id, active=False)\n await redis_commands.clear_user(user_id)\n await redis_commands.clear_search_ids(user_id)\n delete_bot_count += 1\n\n await msg.reply(\"Сообщение успешно отправлено: оставили бот({}), заблокировали бот({})\"\n \"\".format(len(active_user_ids) - delete_bot_count, delete_bot_count))\n\n logging.info(f\"Admin @{admin.username}-{admin.id} SAY TO ALL MSG(id={msg.message_id})\")\n\n\[email protected]_handler(commands=\"show_state_statistic\", user_id=ADMINS, state=\"*\")\nasync def show_state_statistic(command_msg: Message, state: FSMContext):\n admin = command_msg.from_user\n statistic = dict()\n\n await command_msg.delete()\n\n states_list = await storage.get_states_list()\n for states_item in states_list:\n chat_id, user_id = states_item\n state_text = await storage.get_state(chat=chat_id, user=user_id, default=\"Deactivate bot\")\n try:\n statistic[state_text] += 1\n except KeyError:\n statistic.update({state_text: 1})\n\n out_text = \"<b>Статичктика по пользователям:</b>\\n\\n\"\n for state_text, count_users in statistic.items():\n out_text += f\"В состоянии {state_text} — {count_users} пользователей\\n\\n\"\n\n await command_msg.answer(out_text)\n\n logging.info(f\"For Admin @{admin.username}-{admin.id} show state statistic\")\n\n\n@rate_limit(3)\[email protected]_handler(commands=\"show_info\", user_id=ADMINS, state=\"*\")\nasync def show_info(command_msg: Message, state: FSMContext):\n admin = command_msg.from_user\n\n await command_msg.delete()\n\n await cur_bot_info(for_chat_id=command_msg.chat.id)\n\n logging.info(f\"For admin @{admin.username}-{admin.id} SHOW INFO(command)\")\n\n\[email protected]_query_handler(active_menu_callback.filter(), chat_id=ADMIN_CHAT_ID, state=\"*\")\nasync def change_active(call: CallbackQuery, state: FSMContext, callback_data: dict):\n active = not bool(int(callback_data[\"active\"]))\n user_id = int(callback_data[\"user_id\"])\n admin = call.from_user\n profile_msg = call.message\n\n if active:\n await db.unban_user(user_id)\n await redis_commands.unban_user(user_id)\n else:\n await db.ban_user(user_id)\n await redis_commands.ban_user(user_id)\n\n await profile_msg.edit_reply_markup(keyboards.inline.get_activate_menu(user_id=user_id, active=active))\n await call.answer()\n\n logging.info(f\"Admin @{admin.username}-{admin.id} CHANGE ACTIVE FOR USER-{user_id} TO {active}\")\n", "import asyncio\nimport logging\nimport random\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.types import ContentTypes, Message, CallbackQuery\nfrom aiogram.utils.exceptions import BotBlocked\nimport keyboards\nfrom data.config import ADMINS, ADMIN_CHAT_ID\nfrom keyboards.inline.activate_menu import active_menu_callback\nfrom loader import dp, db, storage\nfrom utils import text\nfrom utils.db_api import redis_commands\nfrom utils.jobs import cur_bot_info\nfrom utils.misc import rate_limit\n\n\[email protected]_handler(commands='upload', user_id=ADMINS, state='*')\nasync def upload_profile(command_msg: Message, state: FSMContext):\n profile_msg = command_msg.reply_to_message\n admin = command_msg.from_user\n param = command_msg.get_args()\n if not profile_msg:\n await command_msg.answer('Чтобы загрузить анкету сделай на неё REPLY')\n return\n elif param != 'g' and param != 'b':\n await command_msg.answer(\n 'Чтобы воспользоваться командой /upload нужно добавить параметры <b>b | g</b>'\n )\n return\n other_bot = profile_msg.forward_from\n if not other_bot or other_bot.id != 1234060895:\n await profile_msg.reply(\n 'Загружать анкеты можно только из нашего БотаX :)')\n return\n elif not profile_msg.photo and not profile_msg.video or not profile_msg.caption:\n await profile_msg.reply(\n 'Загружать нужно именно анкету, а не части анкеты')\n return\n profile_data = text.get_parse_data(profile_msg.caption)\n if profile_msg.photo:\n media_id = profile_msg.photo[-1].file_id\n with_video = False\n else:\n media_id = profile_msg.video.file_id\n with_video = True\n profile_data.update(id=random.randint(1, 100000), username='f',\n media_id=media_id, with_video=with_video, sex=1 if param == 'g' else 2)\n await db.add_user(**profile_data)\n await profile_msg.reply('Пользователь {}-{} успешно добавлен ✅'.format(\n profile_data['user_nick'], profile_data['id']))\n logging.info(\n f\"Admin @{admin.username}-{admin.id} successfully added fake {profile_data['user_nick']}-{profile_data['id']} \"\n )\n\n\[email protected]_handler(commands='get_msg_info', user_id=ADMINS, state='*')\nasync def get_msg_info(command_msg: Message, state: FSMContext):\n msg = command_msg.reply_to_message\n await command_msg.delete()\n if not msg:\n await command_msg.answer('Нужно делать реплай на сообщение.')\n return\n state = await state.get_state()\n await msg.reply(\n f\"\"\"Эхо в состоянии <code>{state}</code>.\n\nСодержание сообщения:\n\n<code>{msg}</code>\n\ncontent_type = {msg.content_type}\n\nentities={msg.entities}\"\"\"\n )\n\n\[email protected]_handler(commands='ban_user', user_id=ADMINS, state='*')\nasync def ban_user(command_msg: Message, state: FSMContext):\n ban_user_id = command_msg.get_args()\n admin = command_msg.from_user\n await command_msg.delete()\n if not ban_user_id or not ban_user_id.isdecimal():\n await command_msg.answer(f'Формат команды: /ban_user user_id')\n return\n ban_user_id = int(ban_user_id)\n is_banned = await db.ban_user(ban_user_id)\n if not is_banned:\n await command_msg.answer(\n f'Пользователя с таким <user_id> не существует')\n return\n await redis_commands.ban_user(ban_user_id)\n await command_msg.answer('Пользователь({}) успешно забанен 😎'.format(\n ban_user_id))\n logging.info(f'Admin @{admin.username}-{admin.id} BAN USER-{ban_user_id}')\n\n\[email protected]_handler(commands='unban_user', user_id=ADMINS, state='*')\nasync def unban_user(command_msg: Message, state: FSMContext):\n unban_user_id = command_msg.get_args()\n admin = command_msg.from_user\n await command_msg.delete()\n if not unban_user_id or not unban_user_id.isdecimal():\n await command_msg.answer(f'Формат команды: /unban_user user_id')\n return\n unban_user_id = int(unban_user_id)\n is_unbanned = await db.unban_user(unban_user_id)\n if not is_unbanned:\n await command_msg.answer(\n f'Пользователя с таким <user_id> не существует')\n return\n await redis_commands.unban_user(unban_user_id)\n await command_msg.answer('Пользователь({}) успешно разбанен 👻'.format(\n unban_user_id))\n logging.info(\n f'Admin @{admin.username}-{admin.id} UNBAN USER-{unban_user_id}')\n\n\[email protected]_handler(commands='clean_old_likes', user_id=ADMINS, state='*')\nasync def clean_old_likes(command_msg: Message, state: FSMContext):\n admin = command_msg.from_user\n await command_msg.delete()\n count = await db.clean_old_likes(interval=24)\n await command_msg.answer(\n 'Было успешно удалено {} старых лайков(за {} hours)'.format(count, 24))\n logging.info(\n f'Admin @{admin.username}-{admin.id} delete old likes(count={count})')\n\n\[email protected]_handler(commands='say_to_all_now_go', user_id=ADMINS, state='*')\nasync def say_to_all(command_msg: Message, state: FSMContext):\n admin = command_msg.from_user\n msg = command_msg.reply_to_message\n await command_msg.delete()\n if not msg:\n await command_msg.answer(\n 'Чтобы воспользоваться этой командой сделай REPLY')\n return\n active_user_ids = await db.get_all_users(active=True)\n delete_bot_count = 0\n for user_id in active_user_ids:\n try:\n await dp.bot.copy_message(chat_id=user_id, from_chat_id=\n command_msg.chat.id, message_id=msg.message_id)\n await asyncio.sleep(0.05)\n except BotBlocked as exc:\n await db.update_user(user_id, active=False)\n await redis_commands.clear_user(user_id)\n await redis_commands.clear_search_ids(user_id)\n delete_bot_count += 1\n await msg.reply(\n 'Сообщение успешно отправлено: оставили бот({}), заблокировали бот({})'\n .format(len(active_user_ids) - delete_bot_count, delete_bot_count))\n logging.info(\n f'Admin @{admin.username}-{admin.id} SAY TO ALL MSG(id={msg.message_id})'\n )\n\n\[email protected]_handler(commands='show_state_statistic', user_id=ADMINS, state='*')\nasync def show_state_statistic(command_msg: Message, state: FSMContext):\n admin = command_msg.from_user\n statistic = dict()\n await command_msg.delete()\n states_list = await storage.get_states_list()\n for states_item in states_list:\n chat_id, user_id = states_item\n state_text = await storage.get_state(chat=chat_id, user=user_id,\n default='Deactivate bot')\n try:\n statistic[state_text] += 1\n except KeyError:\n statistic.update({state_text: 1})\n out_text = '<b>Статичктика по пользователям:</b>\\n\\n'\n for state_text, count_users in statistic.items():\n out_text += (\n f'В состоянии {state_text} — {count_users} пользователей\\n\\n')\n await command_msg.answer(out_text)\n logging.info(f'For Admin @{admin.username}-{admin.id} show state statistic'\n )\n\n\n@rate_limit(3)\[email protected]_handler(commands='show_info', user_id=ADMINS, state='*')\nasync def show_info(command_msg: Message, state: FSMContext):\n admin = command_msg.from_user\n await command_msg.delete()\n await cur_bot_info(for_chat_id=command_msg.chat.id)\n logging.info(f'For admin @{admin.username}-{admin.id} SHOW INFO(command)')\n\n\[email protected]_query_handler(active_menu_callback.filter(), chat_id=\n ADMIN_CHAT_ID, state='*')\nasync def change_active(call: CallbackQuery, state: FSMContext,\n callback_data: dict):\n active = not bool(int(callback_data['active']))\n user_id = int(callback_data['user_id'])\n admin = call.from_user\n profile_msg = call.message\n if active:\n await db.unban_user(user_id)\n await redis_commands.unban_user(user_id)\n else:\n await db.ban_user(user_id)\n await redis_commands.ban_user(user_id)\n await profile_msg.edit_reply_markup(keyboards.inline.get_activate_menu(\n user_id=user_id, active=active))\n await call.answer()\n logging.info(\n f'Admin @{admin.username}-{admin.id} CHANGE ACTIVE FOR USER-{user_id} TO {active}'\n )\n", "<import token>\n\n\[email protected]_handler(commands='upload', user_id=ADMINS, state='*')\nasync def upload_profile(command_msg: Message, state: FSMContext):\n profile_msg = command_msg.reply_to_message\n admin = command_msg.from_user\n param = command_msg.get_args()\n if not profile_msg:\n await command_msg.answer('Чтобы загрузить анкету сделай на неё REPLY')\n return\n elif param != 'g' and param != 'b':\n await command_msg.answer(\n 'Чтобы воспользоваться командой /upload нужно добавить параметры <b>b | g</b>'\n )\n return\n other_bot = profile_msg.forward_from\n if not other_bot or other_bot.id != 1234060895:\n await profile_msg.reply(\n 'Загружать анкеты можно только из нашего БотаX :)')\n return\n elif not profile_msg.photo and not profile_msg.video or not profile_msg.caption:\n await profile_msg.reply(\n 'Загружать нужно именно анкету, а не части анкеты')\n return\n profile_data = text.get_parse_data(profile_msg.caption)\n if profile_msg.photo:\n media_id = profile_msg.photo[-1].file_id\n with_video = False\n else:\n media_id = profile_msg.video.file_id\n with_video = True\n profile_data.update(id=random.randint(1, 100000), username='f',\n media_id=media_id, with_video=with_video, sex=1 if param == 'g' else 2)\n await db.add_user(**profile_data)\n await profile_msg.reply('Пользователь {}-{} успешно добавлен ✅'.format(\n profile_data['user_nick'], profile_data['id']))\n logging.info(\n f\"Admin @{admin.username}-{admin.id} successfully added fake {profile_data['user_nick']}-{profile_data['id']} \"\n )\n\n\[email protected]_handler(commands='get_msg_info', user_id=ADMINS, state='*')\nasync def get_msg_info(command_msg: Message, state: FSMContext):\n msg = command_msg.reply_to_message\n await command_msg.delete()\n if not msg:\n await command_msg.answer('Нужно делать реплай на сообщение.')\n return\n state = await state.get_state()\n await msg.reply(\n f\"\"\"Эхо в состоянии <code>{state}</code>.\n\nСодержание сообщения:\n\n<code>{msg}</code>\n\ncontent_type = {msg.content_type}\n\nentities={msg.entities}\"\"\"\n )\n\n\[email protected]_handler(commands='ban_user', user_id=ADMINS, state='*')\nasync def ban_user(command_msg: Message, state: FSMContext):\n ban_user_id = command_msg.get_args()\n admin = command_msg.from_user\n await command_msg.delete()\n if not ban_user_id or not ban_user_id.isdecimal():\n await command_msg.answer(f'Формат команды: /ban_user user_id')\n return\n ban_user_id = int(ban_user_id)\n is_banned = await db.ban_user(ban_user_id)\n if not is_banned:\n await command_msg.answer(\n f'Пользователя с таким <user_id> не существует')\n return\n await redis_commands.ban_user(ban_user_id)\n await command_msg.answer('Пользователь({}) успешно забанен 😎'.format(\n ban_user_id))\n logging.info(f'Admin @{admin.username}-{admin.id} BAN USER-{ban_user_id}')\n\n\[email protected]_handler(commands='unban_user', user_id=ADMINS, state='*')\nasync def unban_user(command_msg: Message, state: FSMContext):\n unban_user_id = command_msg.get_args()\n admin = command_msg.from_user\n await command_msg.delete()\n if not unban_user_id or not unban_user_id.isdecimal():\n await command_msg.answer(f'Формат команды: /unban_user user_id')\n return\n unban_user_id = int(unban_user_id)\n is_unbanned = await db.unban_user(unban_user_id)\n if not is_unbanned:\n await command_msg.answer(\n f'Пользователя с таким <user_id> не существует')\n return\n await redis_commands.unban_user(unban_user_id)\n await command_msg.answer('Пользователь({}) успешно разбанен 👻'.format(\n unban_user_id))\n logging.info(\n f'Admin @{admin.username}-{admin.id} UNBAN USER-{unban_user_id}')\n\n\[email protected]_handler(commands='clean_old_likes', user_id=ADMINS, state='*')\nasync def clean_old_likes(command_msg: Message, state: FSMContext):\n admin = command_msg.from_user\n await command_msg.delete()\n count = await db.clean_old_likes(interval=24)\n await command_msg.answer(\n 'Было успешно удалено {} старых лайков(за {} hours)'.format(count, 24))\n logging.info(\n f'Admin @{admin.username}-{admin.id} delete old likes(count={count})')\n\n\[email protected]_handler(commands='say_to_all_now_go', user_id=ADMINS, state='*')\nasync def say_to_all(command_msg: Message, state: FSMContext):\n admin = command_msg.from_user\n msg = command_msg.reply_to_message\n await command_msg.delete()\n if not msg:\n await command_msg.answer(\n 'Чтобы воспользоваться этой командой сделай REPLY')\n return\n active_user_ids = await db.get_all_users(active=True)\n delete_bot_count = 0\n for user_id in active_user_ids:\n try:\n await dp.bot.copy_message(chat_id=user_id, from_chat_id=\n command_msg.chat.id, message_id=msg.message_id)\n await asyncio.sleep(0.05)\n except BotBlocked as exc:\n await db.update_user(user_id, active=False)\n await redis_commands.clear_user(user_id)\n await redis_commands.clear_search_ids(user_id)\n delete_bot_count += 1\n await msg.reply(\n 'Сообщение успешно отправлено: оставили бот({}), заблокировали бот({})'\n .format(len(active_user_ids) - delete_bot_count, delete_bot_count))\n logging.info(\n f'Admin @{admin.username}-{admin.id} SAY TO ALL MSG(id={msg.message_id})'\n )\n\n\[email protected]_handler(commands='show_state_statistic', user_id=ADMINS, state='*')\nasync def show_state_statistic(command_msg: Message, state: FSMContext):\n admin = command_msg.from_user\n statistic = dict()\n await command_msg.delete()\n states_list = await storage.get_states_list()\n for states_item in states_list:\n chat_id, user_id = states_item\n state_text = await storage.get_state(chat=chat_id, user=user_id,\n default='Deactivate bot')\n try:\n statistic[state_text] += 1\n except KeyError:\n statistic.update({state_text: 1})\n out_text = '<b>Статичктика по пользователям:</b>\\n\\n'\n for state_text, count_users in statistic.items():\n out_text += (\n f'В состоянии {state_text} — {count_users} пользователей\\n\\n')\n await command_msg.answer(out_text)\n logging.info(f'For Admin @{admin.username}-{admin.id} show state statistic'\n )\n\n\n@rate_limit(3)\[email protected]_handler(commands='show_info', user_id=ADMINS, state='*')\nasync def show_info(command_msg: Message, state: FSMContext):\n admin = command_msg.from_user\n await command_msg.delete()\n await cur_bot_info(for_chat_id=command_msg.chat.id)\n logging.info(f'For admin @{admin.username}-{admin.id} SHOW INFO(command)')\n\n\[email protected]_query_handler(active_menu_callback.filter(), chat_id=\n ADMIN_CHAT_ID, state='*')\nasync def change_active(call: CallbackQuery, state: FSMContext,\n callback_data: dict):\n active = not bool(int(callback_data['active']))\n user_id = int(callback_data['user_id'])\n admin = call.from_user\n profile_msg = call.message\n if active:\n await db.unban_user(user_id)\n await redis_commands.unban_user(user_id)\n else:\n await db.ban_user(user_id)\n await redis_commands.ban_user(user_id)\n await profile_msg.edit_reply_markup(keyboards.inline.get_activate_menu(\n user_id=user_id, active=active))\n await call.answer()\n logging.info(\n f'Admin @{admin.username}-{admin.id} CHANGE ACTIVE FOR USER-{user_id} TO {active}'\n )\n", "<import token>\n<code token>\n" ]
false
340
de925b8f6bd31bfdfd1f04628659847b0761899d
#!/usr/bin/env python # -*- coding: utf-8 -*- ''' @author: Allen(Zifeng) An @course: @contact: [email protected] @file: 17. Letter Combinations of a Phone Number.py @time: 2020/2/2 21:18 ''' from typing import List class Solution: def letterCombinations(self, digits: str) -> List[str]: d={2:'abc', 3:'def', 4:'ghi', 5:'jkl', 6:'mno', 7:'pqrs', 8:'tuv', 9:'wxyz' } def merge(body,digits): if len(digits)==0: ans.append(body) return else: for c in d[int(digits[0])]: merge(body+c,digits[1:]) # arr=[] ans=[] # for digit in digits: # arr.append(list(d[int(digit)])) # print(arr) merge('',digits) return ans if len(ans)!=1 else [] print(Solution().letterCombinations('')) # # class Solution: # def letterCombinations(self, digits: str) -> List[str]: # d={2:'abc', # 3:'def', # 4:'ghi', # 5:'jkl', # 6:'mno', # 7:'pqrs', # 8:'tuv', # 9:'wxyz' # } # # cmb=[''] if len(digits)!=0 else [] # # for digit in digits: # cmb=[p+q for p in cmb for q in d[int(digit)]] # # return cmb # print(Solution().letterCombinations('23'))
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\n@author: Allen(Zifeng) An\n@course: \n@contact: [email protected]\n@file: 17. Letter Combinations of a Phone Number.py\n@time: 2020/2/2 21:18\n'''\nfrom typing import List\n\n\nclass Solution:\n def letterCombinations(self, digits: str) -> List[str]:\n d={2:'abc',\n 3:'def',\n 4:'ghi',\n 5:'jkl',\n 6:'mno',\n 7:'pqrs',\n 8:'tuv',\n 9:'wxyz'\n }\n\n def merge(body,digits):\n\n if len(digits)==0:\n ans.append(body)\n return\n else:\n for c in d[int(digits[0])]:\n merge(body+c,digits[1:])\n\n # arr=[]\n ans=[]\n # for digit in digits:\n # arr.append(list(d[int(digit)]))\n # print(arr)\n merge('',digits)\n return ans if len(ans)!=1 else []\n\nprint(Solution().letterCombinations(''))\n\n#\n# class Solution:\n# def letterCombinations(self, digits: str) -> List[str]:\n# d={2:'abc',\n# 3:'def',\n# 4:'ghi',\n# 5:'jkl',\n# 6:'mno',\n# 7:'pqrs',\n# 8:'tuv',\n# 9:'wxyz'\n# }\n#\n# cmb=[''] if len(digits)!=0 else []\n#\n# for digit in digits:\n# cmb=[p+q for p in cmb for q in d[int(digit)]]\n#\n# return cmb\n# print(Solution().letterCombinations('23'))\n\n", "<docstring token>\nfrom typing import List\n\n\nclass Solution:\n\n def letterCombinations(self, digits: str) ->List[str]:\n d = {(2): 'abc', (3): 'def', (4): 'ghi', (5): 'jkl', (6): 'mno', (7\n ): 'pqrs', (8): 'tuv', (9): 'wxyz'}\n\n def merge(body, digits):\n if len(digits) == 0:\n ans.append(body)\n return\n else:\n for c in d[int(digits[0])]:\n merge(body + c, digits[1:])\n ans = []\n merge('', digits)\n return ans if len(ans) != 1 else []\n\n\nprint(Solution().letterCombinations(''))\n", "<docstring token>\n<import token>\n\n\nclass Solution:\n\n def letterCombinations(self, digits: str) ->List[str]:\n d = {(2): 'abc', (3): 'def', (4): 'ghi', (5): 'jkl', (6): 'mno', (7\n ): 'pqrs', (8): 'tuv', (9): 'wxyz'}\n\n def merge(body, digits):\n if len(digits) == 0:\n ans.append(body)\n return\n else:\n for c in d[int(digits[0])]:\n merge(body + c, digits[1:])\n ans = []\n merge('', digits)\n return ans if len(ans) != 1 else []\n\n\nprint(Solution().letterCombinations(''))\n", "<docstring token>\n<import token>\n\n\nclass Solution:\n\n def letterCombinations(self, digits: str) ->List[str]:\n d = {(2): 'abc', (3): 'def', (4): 'ghi', (5): 'jkl', (6): 'mno', (7\n ): 'pqrs', (8): 'tuv', (9): 'wxyz'}\n\n def merge(body, digits):\n if len(digits) == 0:\n ans.append(body)\n return\n else:\n for c in d[int(digits[0])]:\n merge(body + c, digits[1:])\n ans = []\n merge('', digits)\n return ans if len(ans) != 1 else []\n\n\n<code token>\n", "<docstring token>\n<import token>\n\n\nclass Solution:\n <function token>\n\n\n<code token>\n", "<docstring token>\n<import token>\n<class token>\n<code token>\n" ]
false
341
6531833a4fe57c15c0668cee9015c7d43491427a
/home/openerp/production/extra-addons/productivity_analysis/report/productivity_analysis.py
[ "/home/openerp/production/extra-addons/productivity_analysis/report/productivity_analysis.py" ]
true
342
b3d9013ab6facb8dd9361e2a0715a8ed0cdfeaba
from setuptools import setup import imp def get_version(): ver_file = None try: ver_file, pathname, description = imp.find_module('__version__', ['cmakelint']) vermod = imp.load_module('__version__', ver_file, pathname, description) version = vermod.VERSION return version finally: if ver_file is not None: ver_file.close() setup(name='cmakelint', version=get_version(), packages=['cmakelint'], scripts=['bin/cmakelint'], entry_points={ 'console_scripts': [ 'cmakelint = cmakelint.main:main' ] }, install_requires=[''], author="Richard Quirk", author_email="[email protected]", url="https://github.com/richq/cmake-lint", download_url="https://github.com/richq/cmake-lint", keywords=["cmake", "lint"], classifiers=[ "Topic :: Software Development", "Programming Language :: Other", "Programming Language :: Python", "License :: OSI Approved :: Apache Software License"], description="Static code checker for CMake files", long_description="""cmakelint parses CMake files and reports style issues.""", license="Apache 2.0")
[ "from setuptools import setup\n\nimport imp\n\n\ndef get_version():\n ver_file = None\n try:\n ver_file, pathname, description = imp.find_module('__version__', ['cmakelint'])\n vermod = imp.load_module('__version__', ver_file, pathname, description)\n version = vermod.VERSION\n return version\n finally:\n if ver_file is not None:\n ver_file.close()\n\n\nsetup(name='cmakelint',\n version=get_version(),\n packages=['cmakelint'],\n scripts=['bin/cmakelint'],\n entry_points={\n 'console_scripts': [\n 'cmakelint = cmakelint.main:main'\n ]\n },\n install_requires=[''],\n author=\"Richard Quirk\",\n author_email=\"[email protected]\",\n url=\"https://github.com/richq/cmake-lint\",\n download_url=\"https://github.com/richq/cmake-lint\",\n keywords=[\"cmake\", \"lint\"],\n classifiers=[\n \"Topic :: Software Development\",\n \"Programming Language :: Other\",\n \"Programming Language :: Python\",\n \"License :: OSI Approved :: Apache Software License\"],\n description=\"Static code checker for CMake files\",\n long_description=\"\"\"cmakelint parses CMake files and reports style issues.\"\"\",\n license=\"Apache 2.0\")\n", "from setuptools import setup\nimport imp\n\n\ndef get_version():\n ver_file = None\n try:\n ver_file, pathname, description = imp.find_module('__version__', [\n 'cmakelint'])\n vermod = imp.load_module('__version__', ver_file, pathname, description\n )\n version = vermod.VERSION\n return version\n finally:\n if ver_file is not None:\n ver_file.close()\n\n\nsetup(name='cmakelint', version=get_version(), packages=['cmakelint'],\n scripts=['bin/cmakelint'], entry_points={'console_scripts': [\n 'cmakelint = cmakelint.main:main']}, install_requires=[''], author=\n 'Richard Quirk', author_email='[email protected]', url=\n 'https://github.com/richq/cmake-lint', download_url=\n 'https://github.com/richq/cmake-lint', keywords=['cmake', 'lint'],\n classifiers=['Topic :: Software Development',\n 'Programming Language :: Other', 'Programming Language :: Python',\n 'License :: OSI Approved :: Apache Software License'], description=\n 'Static code checker for CMake files', long_description=\n 'cmakelint parses CMake files and reports style issues.', license=\n 'Apache 2.0')\n", "<import token>\n\n\ndef get_version():\n ver_file = None\n try:\n ver_file, pathname, description = imp.find_module('__version__', [\n 'cmakelint'])\n vermod = imp.load_module('__version__', ver_file, pathname, description\n )\n version = vermod.VERSION\n return version\n finally:\n if ver_file is not None:\n ver_file.close()\n\n\nsetup(name='cmakelint', version=get_version(), packages=['cmakelint'],\n scripts=['bin/cmakelint'], entry_points={'console_scripts': [\n 'cmakelint = cmakelint.main:main']}, install_requires=[''], author=\n 'Richard Quirk', author_email='[email protected]', url=\n 'https://github.com/richq/cmake-lint', download_url=\n 'https://github.com/richq/cmake-lint', keywords=['cmake', 'lint'],\n classifiers=['Topic :: Software Development',\n 'Programming Language :: Other', 'Programming Language :: Python',\n 'License :: OSI Approved :: Apache Software License'], description=\n 'Static code checker for CMake files', long_description=\n 'cmakelint parses CMake files and reports style issues.', license=\n 'Apache 2.0')\n", "<import token>\n\n\ndef get_version():\n ver_file = None\n try:\n ver_file, pathname, description = imp.find_module('__version__', [\n 'cmakelint'])\n vermod = imp.load_module('__version__', ver_file, pathname, description\n )\n version = vermod.VERSION\n return version\n finally:\n if ver_file is not None:\n ver_file.close()\n\n\n<code token>\n", "<import token>\n<function token>\n<code token>\n" ]
false
343
359db73de2c2bb5967723dfb78f98fb84b337b9d
from math import degrees, sqrt, sin, cos, atan, radians from pygame import Surface, draw from pygame.sprite import Sprite from constants import ARROW_MAX_SPEED, FLOOR_Y from game_types import Radian, Degree class Arrow(Sprite): def __init__(self, color, screen, character, click_position): Sprite.__init__(self) self.color = color self.screen = screen self.character = character self.click_position = click_position width, height = 2, 2 self.image = Surface([width, height]) self.image.fill(color) draw.rect(self.image, color, [0, 0, width, height]) self.rect = self.image.get_rect() self.released = False self.release_speed = None self.release_position = None self.angle: Degree = None self.t = 0 self.is_moving_right = False self.stopped = False def set_center(self): if self.released: return x, y = self.character.get_center() self.rect.x = x self.rect.y = y def get_center(self): return self.rect.center def update(self): if self.rect.y >= FLOOR_Y: self.stopped = True if not self.released: return speed = self.release_speed t = self.t g = 0.980 vx = cos(self.angle) * speed vy = -sin(self.angle) * speed + 0.5 * g * t * t # print("angle {} -sin(angle) {:0<25} 0.5 * g * t * t {:0<25}".format(self.angle, -sin(self.angle), 0.5*g*t*t)) self.rect.x += vx self.rect.y += vy self.t += 0.1 def release(self, release_position): if self.released: return self.released = True click_x, click_y = self.click_position self.release_position = release_x, release_y = release_position if release_x < click_x: self.is_moving_right = True adjacent, opposite = self.get_catheuses() angle: Degree = self.get_release_angle(adjacent, opposite) aiming_down = release_y < click_y self.angle = self.adjust_angle_to_aim_direction(angle, aiming_down, self.is_moving_right) length = self.get_line_length(adjacent, opposite) self.release_speed = self.get_release_speed(length) def get_catheuses(self): click_x, click_y = self.click_position release_x, release_y = self.release_position adjacent = abs(release_x - click_x) opposite = abs(release_y - click_y) return adjacent, opposite def get_release_angle(self, adjacent: float, opposite: float) -> Degree: if adjacent == 0: return 90 return degrees(atan(opposite / adjacent)) def adjust_angle_to_aim_direction(self, angle: Degree, aiming_down: bool, is_moving_right: bool) -> Radian: adjustment: Degree = 0 if is_moving_right: if aiming_down: adjustment = 180 + 2 * (90 - angle) else: if aiming_down: adjustment = 180 else: adjustment = 2 * (90 - angle) return radians(angle + adjustment) def get_line_length(self, adjacent, opposite) -> float: return sqrt(adjacent ** 2 + opposite ** 2) def get_release_speed(self, length) -> float: if length > 100: return ARROW_MAX_SPEED # Ex.: 80 / 100 = 80% * ARROW_MAX_SPEED return ARROW_MAX_SPEED * (length / 100) def hit(self): self.stopped = True
[ "from math import degrees, sqrt, sin, cos, atan, radians\r\nfrom pygame import Surface, draw\r\nfrom pygame.sprite import Sprite\r\n\r\nfrom constants import ARROW_MAX_SPEED, FLOOR_Y\r\nfrom game_types import Radian, Degree\r\n\r\n\r\nclass Arrow(Sprite):\r\n def __init__(self, color, screen, character, click_position):\r\n Sprite.__init__(self)\r\n self.color = color\r\n self.screen = screen\r\n self.character = character\r\n self.click_position = click_position\r\n\r\n width, height = 2, 2\r\n self.image = Surface([width, height])\r\n self.image.fill(color)\r\n draw.rect(self.image, color, [0, 0, width, height])\r\n self.rect = self.image.get_rect()\r\n self.released = False\r\n self.release_speed = None\r\n self.release_position = None\r\n self.angle: Degree = None\r\n self.t = 0\r\n self.is_moving_right = False\r\n self.stopped = False\r\n\r\n def set_center(self):\r\n if self.released:\r\n return\r\n x, y = self.character.get_center()\r\n self.rect.x = x\r\n self.rect.y = y\r\n\r\n def get_center(self):\r\n return self.rect.center\r\n\r\n def update(self):\r\n if self.rect.y >= FLOOR_Y:\r\n self.stopped = True\r\n if not self.released:\r\n return\r\n speed = self.release_speed\r\n t = self.t\r\n g = 0.980\r\n\r\n vx = cos(self.angle) * speed\r\n vy = -sin(self.angle) * speed + 0.5 * g * t * t\r\n # print(\"angle {} -sin(angle) {:0<25} 0.5 * g * t * t {:0<25}\".format(self.angle, -sin(self.angle), 0.5*g*t*t))\r\n\r\n self.rect.x += vx\r\n self.rect.y += vy\r\n self.t += 0.1\r\n\r\n def release(self, release_position):\r\n if self.released:\r\n return\r\n self.released = True\r\n click_x, click_y = self.click_position\r\n self.release_position = release_x, release_y = release_position\r\n if release_x < click_x:\r\n self.is_moving_right = True\r\n adjacent, opposite = self.get_catheuses()\r\n angle: Degree = self.get_release_angle(adjacent, opposite)\r\n aiming_down = release_y < click_y\r\n self.angle = self.adjust_angle_to_aim_direction(angle, aiming_down, self.is_moving_right)\r\n length = self.get_line_length(adjacent, opposite)\r\n self.release_speed = self.get_release_speed(length)\r\n\r\n def get_catheuses(self):\r\n click_x, click_y = self.click_position\r\n release_x, release_y = self.release_position\r\n adjacent = abs(release_x - click_x)\r\n opposite = abs(release_y - click_y)\r\n return adjacent, opposite\r\n\r\n def get_release_angle(self, adjacent: float, opposite: float) -> Degree:\r\n if adjacent == 0:\r\n return 90\r\n return degrees(atan(opposite / adjacent))\r\n\r\n def adjust_angle_to_aim_direction(self, angle: Degree, aiming_down: bool, is_moving_right: bool) -> Radian:\r\n adjustment: Degree = 0\r\n if is_moving_right:\r\n if aiming_down:\r\n adjustment = 180 + 2 * (90 - angle)\r\n else:\r\n if aiming_down:\r\n adjustment = 180\r\n else:\r\n adjustment = 2 * (90 - angle)\r\n return radians(angle + adjustment)\r\n\r\n def get_line_length(self, adjacent, opposite) -> float:\r\n return sqrt(adjacent ** 2 + opposite ** 2)\r\n\r\n def get_release_speed(self, length) -> float:\r\n if length > 100:\r\n return ARROW_MAX_SPEED\r\n # Ex.: 80 / 100 = 80% * ARROW_MAX_SPEED\r\n return ARROW_MAX_SPEED * (length / 100)\r\n\r\n def hit(self):\r\n self.stopped = True\r\n", "from math import degrees, sqrt, sin, cos, atan, radians\nfrom pygame import Surface, draw\nfrom pygame.sprite import Sprite\nfrom constants import ARROW_MAX_SPEED, FLOOR_Y\nfrom game_types import Radian, Degree\n\n\nclass Arrow(Sprite):\n\n def __init__(self, color, screen, character, click_position):\n Sprite.__init__(self)\n self.color = color\n self.screen = screen\n self.character = character\n self.click_position = click_position\n width, height = 2, 2\n self.image = Surface([width, height])\n self.image.fill(color)\n draw.rect(self.image, color, [0, 0, width, height])\n self.rect = self.image.get_rect()\n self.released = False\n self.release_speed = None\n self.release_position = None\n self.angle: Degree = None\n self.t = 0\n self.is_moving_right = False\n self.stopped = False\n\n def set_center(self):\n if self.released:\n return\n x, y = self.character.get_center()\n self.rect.x = x\n self.rect.y = y\n\n def get_center(self):\n return self.rect.center\n\n def update(self):\n if self.rect.y >= FLOOR_Y:\n self.stopped = True\n if not self.released:\n return\n speed = self.release_speed\n t = self.t\n g = 0.98\n vx = cos(self.angle) * speed\n vy = -sin(self.angle) * speed + 0.5 * g * t * t\n self.rect.x += vx\n self.rect.y += vy\n self.t += 0.1\n\n def release(self, release_position):\n if self.released:\n return\n self.released = True\n click_x, click_y = self.click_position\n self.release_position = release_x, release_y = release_position\n if release_x < click_x:\n self.is_moving_right = True\n adjacent, opposite = self.get_catheuses()\n angle: Degree = self.get_release_angle(adjacent, opposite)\n aiming_down = release_y < click_y\n self.angle = self.adjust_angle_to_aim_direction(angle, aiming_down,\n self.is_moving_right)\n length = self.get_line_length(adjacent, opposite)\n self.release_speed = self.get_release_speed(length)\n\n def get_catheuses(self):\n click_x, click_y = self.click_position\n release_x, release_y = self.release_position\n adjacent = abs(release_x - click_x)\n opposite = abs(release_y - click_y)\n return adjacent, opposite\n\n def get_release_angle(self, adjacent: float, opposite: float) ->Degree:\n if adjacent == 0:\n return 90\n return degrees(atan(opposite / adjacent))\n\n def adjust_angle_to_aim_direction(self, angle: Degree, aiming_down:\n bool, is_moving_right: bool) ->Radian:\n adjustment: Degree = 0\n if is_moving_right:\n if aiming_down:\n adjustment = 180 + 2 * (90 - angle)\n elif aiming_down:\n adjustment = 180\n else:\n adjustment = 2 * (90 - angle)\n return radians(angle + adjustment)\n\n def get_line_length(self, adjacent, opposite) ->float:\n return sqrt(adjacent ** 2 + opposite ** 2)\n\n def get_release_speed(self, length) ->float:\n if length > 100:\n return ARROW_MAX_SPEED\n return ARROW_MAX_SPEED * (length / 100)\n\n def hit(self):\n self.stopped = True\n", "<import token>\n\n\nclass Arrow(Sprite):\n\n def __init__(self, color, screen, character, click_position):\n Sprite.__init__(self)\n self.color = color\n self.screen = screen\n self.character = character\n self.click_position = click_position\n width, height = 2, 2\n self.image = Surface([width, height])\n self.image.fill(color)\n draw.rect(self.image, color, [0, 0, width, height])\n self.rect = self.image.get_rect()\n self.released = False\n self.release_speed = None\n self.release_position = None\n self.angle: Degree = None\n self.t = 0\n self.is_moving_right = False\n self.stopped = False\n\n def set_center(self):\n if self.released:\n return\n x, y = self.character.get_center()\n self.rect.x = x\n self.rect.y = y\n\n def get_center(self):\n return self.rect.center\n\n def update(self):\n if self.rect.y >= FLOOR_Y:\n self.stopped = True\n if not self.released:\n return\n speed = self.release_speed\n t = self.t\n g = 0.98\n vx = cos(self.angle) * speed\n vy = -sin(self.angle) * speed + 0.5 * g * t * t\n self.rect.x += vx\n self.rect.y += vy\n self.t += 0.1\n\n def release(self, release_position):\n if self.released:\n return\n self.released = True\n click_x, click_y = self.click_position\n self.release_position = release_x, release_y = release_position\n if release_x < click_x:\n self.is_moving_right = True\n adjacent, opposite = self.get_catheuses()\n angle: Degree = self.get_release_angle(adjacent, opposite)\n aiming_down = release_y < click_y\n self.angle = self.adjust_angle_to_aim_direction(angle, aiming_down,\n self.is_moving_right)\n length = self.get_line_length(adjacent, opposite)\n self.release_speed = self.get_release_speed(length)\n\n def get_catheuses(self):\n click_x, click_y = self.click_position\n release_x, release_y = self.release_position\n adjacent = abs(release_x - click_x)\n opposite = abs(release_y - click_y)\n return adjacent, opposite\n\n def get_release_angle(self, adjacent: float, opposite: float) ->Degree:\n if adjacent == 0:\n return 90\n return degrees(atan(opposite / adjacent))\n\n def adjust_angle_to_aim_direction(self, angle: Degree, aiming_down:\n bool, is_moving_right: bool) ->Radian:\n adjustment: Degree = 0\n if is_moving_right:\n if aiming_down:\n adjustment = 180 + 2 * (90 - angle)\n elif aiming_down:\n adjustment = 180\n else:\n adjustment = 2 * (90 - angle)\n return radians(angle + adjustment)\n\n def get_line_length(self, adjacent, opposite) ->float:\n return sqrt(adjacent ** 2 + opposite ** 2)\n\n def get_release_speed(self, length) ->float:\n if length > 100:\n return ARROW_MAX_SPEED\n return ARROW_MAX_SPEED * (length / 100)\n\n def hit(self):\n self.stopped = True\n", "<import token>\n\n\nclass Arrow(Sprite):\n\n def __init__(self, color, screen, character, click_position):\n Sprite.__init__(self)\n self.color = color\n self.screen = screen\n self.character = character\n self.click_position = click_position\n width, height = 2, 2\n self.image = Surface([width, height])\n self.image.fill(color)\n draw.rect(self.image, color, [0, 0, width, height])\n self.rect = self.image.get_rect()\n self.released = False\n self.release_speed = None\n self.release_position = None\n self.angle: Degree = None\n self.t = 0\n self.is_moving_right = False\n self.stopped = False\n\n def set_center(self):\n if self.released:\n return\n x, y = self.character.get_center()\n self.rect.x = x\n self.rect.y = y\n\n def get_center(self):\n return self.rect.center\n\n def update(self):\n if self.rect.y >= FLOOR_Y:\n self.stopped = True\n if not self.released:\n return\n speed = self.release_speed\n t = self.t\n g = 0.98\n vx = cos(self.angle) * speed\n vy = -sin(self.angle) * speed + 0.5 * g * t * t\n self.rect.x += vx\n self.rect.y += vy\n self.t += 0.1\n\n def release(self, release_position):\n if self.released:\n return\n self.released = True\n click_x, click_y = self.click_position\n self.release_position = release_x, release_y = release_position\n if release_x < click_x:\n self.is_moving_right = True\n adjacent, opposite = self.get_catheuses()\n angle: Degree = self.get_release_angle(adjacent, opposite)\n aiming_down = release_y < click_y\n self.angle = self.adjust_angle_to_aim_direction(angle, aiming_down,\n self.is_moving_right)\n length = self.get_line_length(adjacent, opposite)\n self.release_speed = self.get_release_speed(length)\n\n def get_catheuses(self):\n click_x, click_y = self.click_position\n release_x, release_y = self.release_position\n adjacent = abs(release_x - click_x)\n opposite = abs(release_y - click_y)\n return adjacent, opposite\n <function token>\n\n def adjust_angle_to_aim_direction(self, angle: Degree, aiming_down:\n bool, is_moving_right: bool) ->Radian:\n adjustment: Degree = 0\n if is_moving_right:\n if aiming_down:\n adjustment = 180 + 2 * (90 - angle)\n elif aiming_down:\n adjustment = 180\n else:\n adjustment = 2 * (90 - angle)\n return radians(angle + adjustment)\n\n def get_line_length(self, adjacent, opposite) ->float:\n return sqrt(adjacent ** 2 + opposite ** 2)\n\n def get_release_speed(self, length) ->float:\n if length > 100:\n return ARROW_MAX_SPEED\n return ARROW_MAX_SPEED * (length / 100)\n\n def hit(self):\n self.stopped = True\n", "<import token>\n\n\nclass Arrow(Sprite):\n\n def __init__(self, color, screen, character, click_position):\n Sprite.__init__(self)\n self.color = color\n self.screen = screen\n self.character = character\n self.click_position = click_position\n width, height = 2, 2\n self.image = Surface([width, height])\n self.image.fill(color)\n draw.rect(self.image, color, [0, 0, width, height])\n self.rect = self.image.get_rect()\n self.released = False\n self.release_speed = None\n self.release_position = None\n self.angle: Degree = None\n self.t = 0\n self.is_moving_right = False\n self.stopped = False\n\n def set_center(self):\n if self.released:\n return\n x, y = self.character.get_center()\n self.rect.x = x\n self.rect.y = y\n <function token>\n\n def update(self):\n if self.rect.y >= FLOOR_Y:\n self.stopped = True\n if not self.released:\n return\n speed = self.release_speed\n t = self.t\n g = 0.98\n vx = cos(self.angle) * speed\n vy = -sin(self.angle) * speed + 0.5 * g * t * t\n self.rect.x += vx\n self.rect.y += vy\n self.t += 0.1\n\n def release(self, release_position):\n if self.released:\n return\n self.released = True\n click_x, click_y = self.click_position\n self.release_position = release_x, release_y = release_position\n if release_x < click_x:\n self.is_moving_right = True\n adjacent, opposite = self.get_catheuses()\n angle: Degree = self.get_release_angle(adjacent, opposite)\n aiming_down = release_y < click_y\n self.angle = self.adjust_angle_to_aim_direction(angle, aiming_down,\n self.is_moving_right)\n length = self.get_line_length(adjacent, opposite)\n self.release_speed = self.get_release_speed(length)\n\n def get_catheuses(self):\n click_x, click_y = self.click_position\n release_x, release_y = self.release_position\n adjacent = abs(release_x - click_x)\n opposite = abs(release_y - click_y)\n return adjacent, opposite\n <function token>\n\n def adjust_angle_to_aim_direction(self, angle: Degree, aiming_down:\n bool, is_moving_right: bool) ->Radian:\n adjustment: Degree = 0\n if is_moving_right:\n if aiming_down:\n adjustment = 180 + 2 * (90 - angle)\n elif aiming_down:\n adjustment = 180\n else:\n adjustment = 2 * (90 - angle)\n return radians(angle + adjustment)\n\n def get_line_length(self, adjacent, opposite) ->float:\n return sqrt(adjacent ** 2 + opposite ** 2)\n\n def get_release_speed(self, length) ->float:\n if length > 100:\n return ARROW_MAX_SPEED\n return ARROW_MAX_SPEED * (length / 100)\n\n def hit(self):\n self.stopped = True\n", "<import token>\n\n\nclass Arrow(Sprite):\n\n def __init__(self, color, screen, character, click_position):\n Sprite.__init__(self)\n self.color = color\n self.screen = screen\n self.character = character\n self.click_position = click_position\n width, height = 2, 2\n self.image = Surface([width, height])\n self.image.fill(color)\n draw.rect(self.image, color, [0, 0, width, height])\n self.rect = self.image.get_rect()\n self.released = False\n self.release_speed = None\n self.release_position = None\n self.angle: Degree = None\n self.t = 0\n self.is_moving_right = False\n self.stopped = False\n\n def set_center(self):\n if self.released:\n return\n x, y = self.character.get_center()\n self.rect.x = x\n self.rect.y = y\n <function token>\n\n def update(self):\n if self.rect.y >= FLOOR_Y:\n self.stopped = True\n if not self.released:\n return\n speed = self.release_speed\n t = self.t\n g = 0.98\n vx = cos(self.angle) * speed\n vy = -sin(self.angle) * speed + 0.5 * g * t * t\n self.rect.x += vx\n self.rect.y += vy\n self.t += 0.1\n <function token>\n\n def get_catheuses(self):\n click_x, click_y = self.click_position\n release_x, release_y = self.release_position\n adjacent = abs(release_x - click_x)\n opposite = abs(release_y - click_y)\n return adjacent, opposite\n <function token>\n\n def adjust_angle_to_aim_direction(self, angle: Degree, aiming_down:\n bool, is_moving_right: bool) ->Radian:\n adjustment: Degree = 0\n if is_moving_right:\n if aiming_down:\n adjustment = 180 + 2 * (90 - angle)\n elif aiming_down:\n adjustment = 180\n else:\n adjustment = 2 * (90 - angle)\n return radians(angle + adjustment)\n\n def get_line_length(self, adjacent, opposite) ->float:\n return sqrt(adjacent ** 2 + opposite ** 2)\n\n def get_release_speed(self, length) ->float:\n if length > 100:\n return ARROW_MAX_SPEED\n return ARROW_MAX_SPEED * (length / 100)\n\n def hit(self):\n self.stopped = True\n", "<import token>\n\n\nclass Arrow(Sprite):\n\n def __init__(self, color, screen, character, click_position):\n Sprite.__init__(self)\n self.color = color\n self.screen = screen\n self.character = character\n self.click_position = click_position\n width, height = 2, 2\n self.image = Surface([width, height])\n self.image.fill(color)\n draw.rect(self.image, color, [0, 0, width, height])\n self.rect = self.image.get_rect()\n self.released = False\n self.release_speed = None\n self.release_position = None\n self.angle: Degree = None\n self.t = 0\n self.is_moving_right = False\n self.stopped = False\n\n def set_center(self):\n if self.released:\n return\n x, y = self.character.get_center()\n self.rect.x = x\n self.rect.y = y\n <function token>\n\n def update(self):\n if self.rect.y >= FLOOR_Y:\n self.stopped = True\n if not self.released:\n return\n speed = self.release_speed\n t = self.t\n g = 0.98\n vx = cos(self.angle) * speed\n vy = -sin(self.angle) * speed + 0.5 * g * t * t\n self.rect.x += vx\n self.rect.y += vy\n self.t += 0.1\n <function token>\n\n def get_catheuses(self):\n click_x, click_y = self.click_position\n release_x, release_y = self.release_position\n adjacent = abs(release_x - click_x)\n opposite = abs(release_y - click_y)\n return adjacent, opposite\n <function token>\n\n def adjust_angle_to_aim_direction(self, angle: Degree, aiming_down:\n bool, is_moving_right: bool) ->Radian:\n adjustment: Degree = 0\n if is_moving_right:\n if aiming_down:\n adjustment = 180 + 2 * (90 - angle)\n elif aiming_down:\n adjustment = 180\n else:\n adjustment = 2 * (90 - angle)\n return radians(angle + adjustment)\n\n def get_line_length(self, adjacent, opposite) ->float:\n return sqrt(adjacent ** 2 + opposite ** 2)\n <function token>\n\n def hit(self):\n self.stopped = True\n", "<import token>\n\n\nclass Arrow(Sprite):\n\n def __init__(self, color, screen, character, click_position):\n Sprite.__init__(self)\n self.color = color\n self.screen = screen\n self.character = character\n self.click_position = click_position\n width, height = 2, 2\n self.image = Surface([width, height])\n self.image.fill(color)\n draw.rect(self.image, color, [0, 0, width, height])\n self.rect = self.image.get_rect()\n self.released = False\n self.release_speed = None\n self.release_position = None\n self.angle: Degree = None\n self.t = 0\n self.is_moving_right = False\n self.stopped = False\n\n def set_center(self):\n if self.released:\n return\n x, y = self.character.get_center()\n self.rect.x = x\n self.rect.y = y\n <function token>\n\n def update(self):\n if self.rect.y >= FLOOR_Y:\n self.stopped = True\n if not self.released:\n return\n speed = self.release_speed\n t = self.t\n g = 0.98\n vx = cos(self.angle) * speed\n vy = -sin(self.angle) * speed + 0.5 * g * t * t\n self.rect.x += vx\n self.rect.y += vy\n self.t += 0.1\n <function token>\n\n def get_catheuses(self):\n click_x, click_y = self.click_position\n release_x, release_y = self.release_position\n adjacent = abs(release_x - click_x)\n opposite = abs(release_y - click_y)\n return adjacent, opposite\n <function token>\n\n def adjust_angle_to_aim_direction(self, angle: Degree, aiming_down:\n bool, is_moving_right: bool) ->Radian:\n adjustment: Degree = 0\n if is_moving_right:\n if aiming_down:\n adjustment = 180 + 2 * (90 - angle)\n elif aiming_down:\n adjustment = 180\n else:\n adjustment = 2 * (90 - angle)\n return radians(angle + adjustment)\n\n def get_line_length(self, adjacent, opposite) ->float:\n return sqrt(adjacent ** 2 + opposite ** 2)\n <function token>\n <function token>\n", "<import token>\n\n\nclass Arrow(Sprite):\n\n def __init__(self, color, screen, character, click_position):\n Sprite.__init__(self)\n self.color = color\n self.screen = screen\n self.character = character\n self.click_position = click_position\n width, height = 2, 2\n self.image = Surface([width, height])\n self.image.fill(color)\n draw.rect(self.image, color, [0, 0, width, height])\n self.rect = self.image.get_rect()\n self.released = False\n self.release_speed = None\n self.release_position = None\n self.angle: Degree = None\n self.t = 0\n self.is_moving_right = False\n self.stopped = False\n\n def set_center(self):\n if self.released:\n return\n x, y = self.character.get_center()\n self.rect.x = x\n self.rect.y = y\n <function token>\n <function token>\n <function token>\n\n def get_catheuses(self):\n click_x, click_y = self.click_position\n release_x, release_y = self.release_position\n adjacent = abs(release_x - click_x)\n opposite = abs(release_y - click_y)\n return adjacent, opposite\n <function token>\n\n def adjust_angle_to_aim_direction(self, angle: Degree, aiming_down:\n bool, is_moving_right: bool) ->Radian:\n adjustment: Degree = 0\n if is_moving_right:\n if aiming_down:\n adjustment = 180 + 2 * (90 - angle)\n elif aiming_down:\n adjustment = 180\n else:\n adjustment = 2 * (90 - angle)\n return radians(angle + adjustment)\n\n def get_line_length(self, adjacent, opposite) ->float:\n return sqrt(adjacent ** 2 + opposite ** 2)\n <function token>\n <function token>\n", "<import token>\n\n\nclass Arrow(Sprite):\n\n def __init__(self, color, screen, character, click_position):\n Sprite.__init__(self)\n self.color = color\n self.screen = screen\n self.character = character\n self.click_position = click_position\n width, height = 2, 2\n self.image = Surface([width, height])\n self.image.fill(color)\n draw.rect(self.image, color, [0, 0, width, height])\n self.rect = self.image.get_rect()\n self.released = False\n self.release_speed = None\n self.release_position = None\n self.angle: Degree = None\n self.t = 0\n self.is_moving_right = False\n self.stopped = False\n\n def set_center(self):\n if self.released:\n return\n x, y = self.character.get_center()\n self.rect.x = x\n self.rect.y = y\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def adjust_angle_to_aim_direction(self, angle: Degree, aiming_down:\n bool, is_moving_right: bool) ->Radian:\n adjustment: Degree = 0\n if is_moving_right:\n if aiming_down:\n adjustment = 180 + 2 * (90 - angle)\n elif aiming_down:\n adjustment = 180\n else:\n adjustment = 2 * (90 - angle)\n return radians(angle + adjustment)\n\n def get_line_length(self, adjacent, opposite) ->float:\n return sqrt(adjacent ** 2 + opposite ** 2)\n <function token>\n <function token>\n", "<import token>\n\n\nclass Arrow(Sprite):\n <function token>\n\n def set_center(self):\n if self.released:\n return\n x, y = self.character.get_center()\n self.rect.x = x\n self.rect.y = y\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def adjust_angle_to_aim_direction(self, angle: Degree, aiming_down:\n bool, is_moving_right: bool) ->Radian:\n adjustment: Degree = 0\n if is_moving_right:\n if aiming_down:\n adjustment = 180 + 2 * (90 - angle)\n elif aiming_down:\n adjustment = 180\n else:\n adjustment = 2 * (90 - angle)\n return radians(angle + adjustment)\n\n def get_line_length(self, adjacent, opposite) ->float:\n return sqrt(adjacent ** 2 + opposite ** 2)\n <function token>\n <function token>\n", "<import token>\n\n\nclass Arrow(Sprite):\n <function token>\n\n def set_center(self):\n if self.released:\n return\n x, y = self.character.get_center()\n self.rect.x = x\n self.rect.y = y\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def adjust_angle_to_aim_direction(self, angle: Degree, aiming_down:\n bool, is_moving_right: bool) ->Radian:\n adjustment: Degree = 0\n if is_moving_right:\n if aiming_down:\n adjustment = 180 + 2 * (90 - angle)\n elif aiming_down:\n adjustment = 180\n else:\n adjustment = 2 * (90 - angle)\n return radians(angle + adjustment)\n <function token>\n <function token>\n <function token>\n", "<import token>\n\n\nclass Arrow(Sprite):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def adjust_angle_to_aim_direction(self, angle: Degree, aiming_down:\n bool, is_moving_right: bool) ->Radian:\n adjustment: Degree = 0\n if is_moving_right:\n if aiming_down:\n adjustment = 180 + 2 * (90 - angle)\n elif aiming_down:\n adjustment = 180\n else:\n adjustment = 2 * (90 - angle)\n return radians(angle + adjustment)\n <function token>\n <function token>\n <function token>\n", "<import token>\n\n\nclass Arrow(Sprite):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n", "<import token>\n<class token>\n" ]
false
344
97cc29e0d54e5d5e05dff16c92ecc4046363185f
from django.conf import settings from django.conf.urls.static import static from django.contrib import admin from django.urls import path, include from home import views from order import views as OV urlpatterns = [ path('user', include('user.urls')), path('order', include('order.urls')), path('shopcart/', OV.shopcart, name='shopcart'), path('product',include('product.urls')), path('',include('home.urls')),# '' - bu home path('faq/', views.faq, name='faq'), path('admin/', admin.site.urls), path('ckeditor', include('ckeditor_uploader.urls')), path('about/', views.about, name='about'), path('contact/', views.contact, name='about'), path('search/', views.search,name='search'), path('search_auto', views.search_auto, name='search_auto'), path('category/<int:id>/<slug:slug>/', views.category_products, name='category_products'), path('product/<int:id>/<slug:slug>/',views.product_detail, name='product_detail'), path('lic/',views.lic,name='lic'), path('post/',views.post,name='post'), path('post/<int:id>/',views.post_detail, name='post_detail'), path('lic/<int:id>/',views.lic_detail, name='lic_detail'), ] if settings.DEBUG: urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
[ "from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom home import views\nfrom order import views as OV\n\nurlpatterns = [\n path('user', include('user.urls')),\n path('order', include('order.urls')),\n path('shopcart/', OV.shopcart, name='shopcart'),\n path('product',include('product.urls')),\n path('',include('home.urls')),# '' - bu home\n path('faq/', views.faq, name='faq'),\n path('admin/', admin.site.urls),\n path('ckeditor', include('ckeditor_uploader.urls')),\n path('about/', views.about, name='about'),\n path('contact/', views.contact, name='about'),\n path('search/', views.search,name='search'),\n path('search_auto', views.search_auto, name='search_auto'),\n path('category/<int:id>/<slug:slug>/', views.category_products, name='category_products'),\n path('product/<int:id>/<slug:slug>/',views.product_detail, name='product_detail'),\n path('lic/',views.lic,name='lic'),\n path('post/',views.post,name='post'),\n path('post/<int:id>/',views.post_detail, name='post_detail'),\n path('lic/<int:id>/',views.lic_detail, name='lic_detail'),\n\n\n]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)", "from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom home import views\nfrom order import views as OV\nurlpatterns = [path('user', include('user.urls')), path('order', include(\n 'order.urls')), path('shopcart/', OV.shopcart, name='shopcart'), path(\n 'product', include('product.urls')), path('', include('home.urls')),\n path('faq/', views.faq, name='faq'), path('admin/', admin.site.urls),\n path('ckeditor', include('ckeditor_uploader.urls')), path('about/',\n views.about, name='about'), path('contact/', views.contact, name=\n 'about'), path('search/', views.search, name='search'), path(\n 'search_auto', views.search_auto, name='search_auto'), path(\n 'category/<int:id>/<slug:slug>/', views.category_products, name=\n 'category_products'), path('product/<int:id>/<slug:slug>/', views.\n product_detail, name='product_detail'), path('lic/', views.lic, name=\n 'lic'), path('post/', views.post, name='post'), path('post/<int:id>/',\n views.post_detail, name='post_detail'), path('lic/<int:id>/', views.\n lic_detail, name='lic_detail')]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n", "<import token>\nurlpatterns = [path('user', include('user.urls')), path('order', include(\n 'order.urls')), path('shopcart/', OV.shopcart, name='shopcart'), path(\n 'product', include('product.urls')), path('', include('home.urls')),\n path('faq/', views.faq, name='faq'), path('admin/', admin.site.urls),\n path('ckeditor', include('ckeditor_uploader.urls')), path('about/',\n views.about, name='about'), path('contact/', views.contact, name=\n 'about'), path('search/', views.search, name='search'), path(\n 'search_auto', views.search_auto, name='search_auto'), path(\n 'category/<int:id>/<slug:slug>/', views.category_products, name=\n 'category_products'), path('product/<int:id>/<slug:slug>/', views.\n product_detail, name='product_detail'), path('lic/', views.lic, name=\n 'lic'), path('post/', views.post, name='post'), path('post/<int:id>/',\n views.post_detail, name='post_detail'), path('lic/<int:id>/', views.\n lic_detail, name='lic_detail')]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n", "<import token>\n<assignment token>\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n", "<import token>\n<assignment token>\n<code token>\n" ]
false
345
35b24ffa14f8b3c2040d5becc8a35721e86d8b3d
total = totmil = cont = menor = 0 barato = ' ' print('-'*40) print('LOJA SUPER BARATÃO') print('-'*40) while True: produto = str(input('Nome do Produto: ')) preco = float(input('Preço: ')) cont += 1 total += preco if preco > 1000: totmil +=1 if cont == 1 or preco < menor: barato = produto menor = preco resp = ' ' while resp not in 'SN': resp = str(input('Quer continuar? [S/N]')).strip().upper()[0] if resp == 'N': break print('O total da compra foi R${:.2f}'.format(total)) print('Temos {} produtos custando mais de R$1000,00'.format(totmil)) print('O produto mais barato foi {} que custa {:.2f}'.format(barato, menor))
[ "total = totmil = cont = menor = 0\nbarato = ' '\nprint('-'*40)\nprint('LOJA SUPER BARATÃO')\nprint('-'*40)\nwhile True:\n produto = str(input('Nome do Produto: '))\n preco = float(input('Preço: '))\n cont += 1\n total += preco\n if preco > 1000:\n totmil +=1\n if cont == 1 or preco < menor:\n barato = produto\n menor = preco\n resp = ' '\n while resp not in 'SN':\n resp = str(input('Quer continuar? [S/N]')).strip().upper()[0]\n if resp == 'N':\n break\nprint('O total da compra foi R${:.2f}'.format(total))\nprint('Temos {} produtos custando mais de R$1000,00'.format(totmil))\nprint('O produto mais barato foi {} que custa {:.2f}'.format(barato, menor))\n\n\n", "total = totmil = cont = menor = 0\nbarato = ' '\nprint('-' * 40)\nprint('LOJA SUPER BARATÃO')\nprint('-' * 40)\nwhile True:\n produto = str(input('Nome do Produto: '))\n preco = float(input('Preço: '))\n cont += 1\n total += preco\n if preco > 1000:\n totmil += 1\n if cont == 1 or preco < menor:\n barato = produto\n menor = preco\n resp = ' '\n while resp not in 'SN':\n resp = str(input('Quer continuar? [S/N]')).strip().upper()[0]\n if resp == 'N':\n break\nprint('O total da compra foi R${:.2f}'.format(total))\nprint('Temos {} produtos custando mais de R$1000,00'.format(totmil))\nprint('O produto mais barato foi {} que custa {:.2f}'.format(barato, menor))\n", "<assignment token>\nprint('-' * 40)\nprint('LOJA SUPER BARATÃO')\nprint('-' * 40)\nwhile True:\n produto = str(input('Nome do Produto: '))\n preco = float(input('Preço: '))\n cont += 1\n total += preco\n if preco > 1000:\n totmil += 1\n if cont == 1 or preco < menor:\n barato = produto\n menor = preco\n resp = ' '\n while resp not in 'SN':\n resp = str(input('Quer continuar? [S/N]')).strip().upper()[0]\n if resp == 'N':\n break\nprint('O total da compra foi R${:.2f}'.format(total))\nprint('Temos {} produtos custando mais de R$1000,00'.format(totmil))\nprint('O produto mais barato foi {} que custa {:.2f}'.format(barato, menor))\n", "<assignment token>\n<code token>\n" ]
false
346
709271b98fc2b40c763522c54488be36968f02d8
from base import * try: from .prod_local import * except: pass # we currently don't have an interface that allows an administrator # to create a repository for another user. Until we have added this # capability, allow users to create repos. ELEMENTARY_ALLOW_REPO_CREATION = True
[ "from base import *\n\ntry:\n from .prod_local import *\nexcept:\n pass\n\n# we currently don't have an interface that allows an administrator\n# to create a repository for another user. Until we have added this\n# capability, allow users to create repos.\nELEMENTARY_ALLOW_REPO_CREATION = True \n\n\n", "from base import *\ntry:\n from .prod_local import *\nexcept:\n pass\nELEMENTARY_ALLOW_REPO_CREATION = True\n", "<import token>\ntry:\n from .prod_local import *\nexcept:\n pass\nELEMENTARY_ALLOW_REPO_CREATION = True\n", "<import token>\ntry:\n from .prod_local import *\nexcept:\n pass\n<assignment token>\n", "<import token>\n<code token>\n<assignment token>\n" ]
false
347
edcccc673994a8de281a683b747de52d2115f89e
from configparser import ConfigParser from ef.config.components import * from ef.config.efconf import EfConf from ef.config.section import ConfigSection comp_list = [BoundaryConditions, InnerRegion, OutputFile, ParticleInteractionModel, ParticleSource, SpatialMesh, TimeGrid, ExternalFieldUniform] def test_components_to_conf_and_back(): for Component in comp_list: x = Component() y = x.to_conf().make() assert x == y def test_conf_to_configparser_and_back(): confs = [C().to_conf() for C in comp_list] parser = ConfigParser() for c in confs: c.add_section_to_parser(parser) conf2 = ConfigSection.parser_to_confs(parser) assert conf2 == confs def test_minimal_example(): parser = ConfigParser() parser.read("examples/minimal_working_example/minimal_conf.conf") components = [conf.make() for conf in ConfigSection.parser_to_confs(parser)] assert components == [TimeGrid(1e-7, 1e-9, 1e-9), SpatialMesh((5, 5, 15), (0.5, 0.5, 1.5)), ParticleInteractionModel('noninteracting'), BoundaryConditions(0), ExternalFieldUniform('mgn_uni', 'magnetic'), ExternalFieldUniform('el_uni', 'electric'), OutputFile('example_', '.h5')] class TestEfConf: def test_conf_export(self): conf = EfConf(sources=[ParticleSource()], inner_regions=(InnerRegion(),)) s = conf.export_to_string() c1 = EfConf.from_string(s) assert c1 == conf def test_conf_repr(self): from numpy import array # for use in eval conf = EfConf(sources=[ParticleSource()], inner_regions=(InnerRegion(),)) s = repr(conf) c1 = eval(s) assert c1 == conf
[ "from configparser import ConfigParser\n\nfrom ef.config.components import *\nfrom ef.config.efconf import EfConf\nfrom ef.config.section import ConfigSection\n\ncomp_list = [BoundaryConditions, InnerRegion, OutputFile, ParticleInteractionModel,\n ParticleSource, SpatialMesh, TimeGrid, ExternalFieldUniform]\n\n\ndef test_components_to_conf_and_back():\n for Component in comp_list:\n x = Component()\n y = x.to_conf().make()\n assert x == y\n\n\ndef test_conf_to_configparser_and_back():\n confs = [C().to_conf() for C in comp_list]\n parser = ConfigParser()\n for c in confs:\n c.add_section_to_parser(parser)\n conf2 = ConfigSection.parser_to_confs(parser)\n assert conf2 == confs\n\n\ndef test_minimal_example():\n parser = ConfigParser()\n parser.read(\"examples/minimal_working_example/minimal_conf.conf\")\n components = [conf.make() for conf in ConfigSection.parser_to_confs(parser)]\n assert components == [TimeGrid(1e-7, 1e-9, 1e-9), SpatialMesh((5, 5, 15), (0.5, 0.5, 1.5)),\n ParticleInteractionModel('noninteracting'), BoundaryConditions(0),\n ExternalFieldUniform('mgn_uni', 'magnetic'),\n ExternalFieldUniform('el_uni', 'electric'),\n OutputFile('example_', '.h5')]\n\n\nclass TestEfConf:\n def test_conf_export(self):\n conf = EfConf(sources=[ParticleSource()], inner_regions=(InnerRegion(),))\n s = conf.export_to_string()\n c1 = EfConf.from_string(s)\n assert c1 == conf\n\n def test_conf_repr(self):\n from numpy import array # for use in eval\n conf = EfConf(sources=[ParticleSource()], inner_regions=(InnerRegion(),))\n s = repr(conf)\n c1 = eval(s)\n assert c1 == conf\n", "from configparser import ConfigParser\nfrom ef.config.components import *\nfrom ef.config.efconf import EfConf\nfrom ef.config.section import ConfigSection\ncomp_list = [BoundaryConditions, InnerRegion, OutputFile,\n ParticleInteractionModel, ParticleSource, SpatialMesh, TimeGrid,\n ExternalFieldUniform]\n\n\ndef test_components_to_conf_and_back():\n for Component in comp_list:\n x = Component()\n y = x.to_conf().make()\n assert x == y\n\n\ndef test_conf_to_configparser_and_back():\n confs = [C().to_conf() for C in comp_list]\n parser = ConfigParser()\n for c in confs:\n c.add_section_to_parser(parser)\n conf2 = ConfigSection.parser_to_confs(parser)\n assert conf2 == confs\n\n\ndef test_minimal_example():\n parser = ConfigParser()\n parser.read('examples/minimal_working_example/minimal_conf.conf')\n components = [conf.make() for conf in ConfigSection.parser_to_confs(parser)\n ]\n assert components == [TimeGrid(1e-07, 1e-09, 1e-09), SpatialMesh((5, 5,\n 15), (0.5, 0.5, 1.5)), ParticleInteractionModel('noninteracting'),\n BoundaryConditions(0), ExternalFieldUniform('mgn_uni', 'magnetic'),\n ExternalFieldUniform('el_uni', 'electric'), OutputFile('example_',\n '.h5')]\n\n\nclass TestEfConf:\n\n def test_conf_export(self):\n conf = EfConf(sources=[ParticleSource()], inner_regions=(\n InnerRegion(),))\n s = conf.export_to_string()\n c1 = EfConf.from_string(s)\n assert c1 == conf\n\n def test_conf_repr(self):\n from numpy import array\n conf = EfConf(sources=[ParticleSource()], inner_regions=(\n InnerRegion(),))\n s = repr(conf)\n c1 = eval(s)\n assert c1 == conf\n", "<import token>\ncomp_list = [BoundaryConditions, InnerRegion, OutputFile,\n ParticleInteractionModel, ParticleSource, SpatialMesh, TimeGrid,\n ExternalFieldUniform]\n\n\ndef test_components_to_conf_and_back():\n for Component in comp_list:\n x = Component()\n y = x.to_conf().make()\n assert x == y\n\n\ndef test_conf_to_configparser_and_back():\n confs = [C().to_conf() for C in comp_list]\n parser = ConfigParser()\n for c in confs:\n c.add_section_to_parser(parser)\n conf2 = ConfigSection.parser_to_confs(parser)\n assert conf2 == confs\n\n\ndef test_minimal_example():\n parser = ConfigParser()\n parser.read('examples/minimal_working_example/minimal_conf.conf')\n components = [conf.make() for conf in ConfigSection.parser_to_confs(parser)\n ]\n assert components == [TimeGrid(1e-07, 1e-09, 1e-09), SpatialMesh((5, 5,\n 15), (0.5, 0.5, 1.5)), ParticleInteractionModel('noninteracting'),\n BoundaryConditions(0), ExternalFieldUniform('mgn_uni', 'magnetic'),\n ExternalFieldUniform('el_uni', 'electric'), OutputFile('example_',\n '.h5')]\n\n\nclass TestEfConf:\n\n def test_conf_export(self):\n conf = EfConf(sources=[ParticleSource()], inner_regions=(\n InnerRegion(),))\n s = conf.export_to_string()\n c1 = EfConf.from_string(s)\n assert c1 == conf\n\n def test_conf_repr(self):\n from numpy import array\n conf = EfConf(sources=[ParticleSource()], inner_regions=(\n InnerRegion(),))\n s = repr(conf)\n c1 = eval(s)\n assert c1 == conf\n", "<import token>\n<assignment token>\n\n\ndef test_components_to_conf_and_back():\n for Component in comp_list:\n x = Component()\n y = x.to_conf().make()\n assert x == y\n\n\ndef test_conf_to_configparser_and_back():\n confs = [C().to_conf() for C in comp_list]\n parser = ConfigParser()\n for c in confs:\n c.add_section_to_parser(parser)\n conf2 = ConfigSection.parser_to_confs(parser)\n assert conf2 == confs\n\n\ndef test_minimal_example():\n parser = ConfigParser()\n parser.read('examples/minimal_working_example/minimal_conf.conf')\n components = [conf.make() for conf in ConfigSection.parser_to_confs(parser)\n ]\n assert components == [TimeGrid(1e-07, 1e-09, 1e-09), SpatialMesh((5, 5,\n 15), (0.5, 0.5, 1.5)), ParticleInteractionModel('noninteracting'),\n BoundaryConditions(0), ExternalFieldUniform('mgn_uni', 'magnetic'),\n ExternalFieldUniform('el_uni', 'electric'), OutputFile('example_',\n '.h5')]\n\n\nclass TestEfConf:\n\n def test_conf_export(self):\n conf = EfConf(sources=[ParticleSource()], inner_regions=(\n InnerRegion(),))\n s = conf.export_to_string()\n c1 = EfConf.from_string(s)\n assert c1 == conf\n\n def test_conf_repr(self):\n from numpy import array\n conf = EfConf(sources=[ParticleSource()], inner_regions=(\n InnerRegion(),))\n s = repr(conf)\n c1 = eval(s)\n assert c1 == conf\n", "<import token>\n<assignment token>\n<function token>\n\n\ndef test_conf_to_configparser_and_back():\n confs = [C().to_conf() for C in comp_list]\n parser = ConfigParser()\n for c in confs:\n c.add_section_to_parser(parser)\n conf2 = ConfigSection.parser_to_confs(parser)\n assert conf2 == confs\n\n\ndef test_minimal_example():\n parser = ConfigParser()\n parser.read('examples/minimal_working_example/minimal_conf.conf')\n components = [conf.make() for conf in ConfigSection.parser_to_confs(parser)\n ]\n assert components == [TimeGrid(1e-07, 1e-09, 1e-09), SpatialMesh((5, 5,\n 15), (0.5, 0.5, 1.5)), ParticleInteractionModel('noninteracting'),\n BoundaryConditions(0), ExternalFieldUniform('mgn_uni', 'magnetic'),\n ExternalFieldUniform('el_uni', 'electric'), OutputFile('example_',\n '.h5')]\n\n\nclass TestEfConf:\n\n def test_conf_export(self):\n conf = EfConf(sources=[ParticleSource()], inner_regions=(\n InnerRegion(),))\n s = conf.export_to_string()\n c1 = EfConf.from_string(s)\n assert c1 == conf\n\n def test_conf_repr(self):\n from numpy import array\n conf = EfConf(sources=[ParticleSource()], inner_regions=(\n InnerRegion(),))\n s = repr(conf)\n c1 = eval(s)\n assert c1 == conf\n", "<import token>\n<assignment token>\n<function token>\n\n\ndef test_conf_to_configparser_and_back():\n confs = [C().to_conf() for C in comp_list]\n parser = ConfigParser()\n for c in confs:\n c.add_section_to_parser(parser)\n conf2 = ConfigSection.parser_to_confs(parser)\n assert conf2 == confs\n\n\n<function token>\n\n\nclass TestEfConf:\n\n def test_conf_export(self):\n conf = EfConf(sources=[ParticleSource()], inner_regions=(\n InnerRegion(),))\n s = conf.export_to_string()\n c1 = EfConf.from_string(s)\n assert c1 == conf\n\n def test_conf_repr(self):\n from numpy import array\n conf = EfConf(sources=[ParticleSource()], inner_regions=(\n InnerRegion(),))\n s = repr(conf)\n c1 = eval(s)\n assert c1 == conf\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\nclass TestEfConf:\n\n def test_conf_export(self):\n conf = EfConf(sources=[ParticleSource()], inner_regions=(\n InnerRegion(),))\n s = conf.export_to_string()\n c1 = EfConf.from_string(s)\n assert c1 == conf\n\n def test_conf_repr(self):\n from numpy import array\n conf = EfConf(sources=[ParticleSource()], inner_regions=(\n InnerRegion(),))\n s = repr(conf)\n c1 = eval(s)\n assert c1 == conf\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\nclass TestEfConf:\n <function token>\n\n def test_conf_repr(self):\n from numpy import array\n conf = EfConf(sources=[ParticleSource()], inner_regions=(\n InnerRegion(),))\n s = repr(conf)\n c1 = eval(s)\n assert c1 == conf\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\nclass TestEfConf:\n <function token>\n <function token>\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<class token>\n" ]
false
348
38e167630519b73bffea4ff527bc7b7272a49f1a
# -*- coding:utf-8 -*- # Author: hankcs # Date: 2019-01-13 15:01 import pickle import numpy as np from bert_serving.client import BertClient from pyhanlp import * CharTable = JClass('com.hankcs.hanlp.dictionary.other.CharTable') # bc = BertClient(ip='192.168.1.88') # ip address of the server bc = BertClient(ip='127.0.0.1') # ip address of the GPU machine def embed_last_token(text): result = bc.encode(text, show_tokens=True) # print(result) batch = [] for sent, tensor, tokens in zip(text, result[0], result[1]): valid = [] tid = 0 buffer = '' words = sent.lower().split() for i, t in enumerate(tokens): if t == '[CLS]' or t == '[SEP]': continue else: if t.startswith('##'): t = t[2:] elif t == '[UNK]': t = words[tid][len(buffer)] buffer += t if buffer == words[tid]: valid.append(i) buffer = '' tid += 1 # print(len(valid)) # exit() if len(valid) != len(sent.split()) or tid != len(words): print(valid) print(sent.split()) print(result[1]) batch.append(tensor[valid, :]) return batch def embed_sum(text): result = bc.encode(text, show_tokens=True) # print(result) batch = [] for sent, tensor, tokens in zip(text, result[0], result[1]): token_tensor = [] sent_tensor = [] tid = 0 buffer = '' words = sent.lower().split() for i, t in enumerate(tokens): if t == '[CLS]' or t == '[SEP]': continue else: if t.startswith('##'): t = t[2:] elif t == '[UNK]': t = words[tid][len(buffer)] buffer += t token_tensor.append(tensor[i, :]) if buffer == words[tid]: sent_tensor.append(np.stack(token_tensor).mean(axis=0)) token_tensor = [] buffer = '' tid += 1 # print(len(valid)) # exit() if tid != len(words) or len(sent_tensor) != len(words): print(sent.split()) print(tokens) exit() batch.append(np.stack(sent_tensor)) return batch def generate_bert(path, output, embed_fun=embed_sum): print(output) total = 0 with open(path) as src: batch = [] tensor = [] for line in src: line = line.strip() if len(line) == 0: continue batch.append(CharTable.convert(line).replace('—', '-') .replace('‘', '\'') .replace('…', '.') .replace('坜', '壢') .replace('唛', '麦') .replace('ㄅㄆㄇㄈ', '呀呀') .replace('’', '\'')) if len(batch) and len(batch) % 100 == 0: tensor.extend(embed_fun(batch)) total += len(batch) print(total) batch = [] if len(batch): tensor.extend(embed_fun(batch)) total += len(batch) print(total) with open(output, 'wb') as f: pickle.dump(tensor, f) if __name__ == '__main__': # generate_bert('data/SemEval-2016/news.test.sent.txt', 'data/SemEval-2016/news.test.bert', embed_fun=embed_sum) # generate_bert('data/SemEval-2016/news.valid.sent.txt', 'data/SemEval-2016/news.valid.bert', embed_fun=embed_sum) # generate_bert('data/SemEval-2016/news.train.sent.txt', 'data/SemEval-2016/news.train.bert', embed_fun=embed_sum) # # generate_bert('data/SemEval-2016/text.test.sent.txt', 'data/SemEval-2016/text.test.bert', embed_fun=embed_sum) # generate_bert('data/SemEval-2016/text.valid.sent.txt', 'data/SemEval-2016/text.valid.bert', embed_fun=embed_sum) # generate_bert('data/SemEval-2016/text.train.sent.txt', 'data/SemEval-2016/text.train.bert', embed_fun=embed_sum) generate_bert('data/semeval15/cz.pas.dev.sent.txt', 'data/embedding/bert_base_sum/cz.pas.dev.bert', embed_fun=embed_sum) generate_bert('data/semeval15/cz.pas.train.sent.txt', 'data/embedding/bert_base_sum/cz.pas.train.bert', embed_fun=embed_sum) generate_bert('data/semeval15/cz.id.pas.sent.txt', 'data/embedding/bert_base_sum/cz.id.pas.bert', embed_fun=embed_sum) # generate_bert('data/ctb5.1-pos/dev.short.sent.txt', 'data/embedding/bert_base_sum/ctb.pos.dev.bert', # embed_fun=embed_sum) # generate_bert('data/ctb5.1-pos/test.short.sent.txt', 'data/embedding/bert_base_sum/ctb.pos.test.bert', # embed_fun=embed_sum) # generate_bert('data/ctb5.1-pos/train.short.sent.txt', 'data/embedding/bert_base_sum/ctb.pos.train.bert', # embed_fun=embed_sum) # generate_bert('data/msra/dev.short.sent.txt', 'data/embedding/bert_base_sum/msra.dev.bert', # embed_fun=embed_sum) # generate_bert('data/msra/test.short.sent.txt', 'data/embedding/bert_base_sum/msra.test.bert', # embed_fun=embed_sum) # generate_bert('data/msra/train.short.sent.txt', 'data/embedding/bert_base_sum/msra.train.bert', # embed_fun=embed_sum) # generate_bert('data/msra/test.auto.short.sent.txt', 'data/embedding/bert_base_sum/msra.test.auto.bert', # embed_fun=embed_sum) # generate_bert('data/msra/test.auto.short.sent.txt', 'data/embedding/bert_base_sum/msra.test.auto.bert', # embed_fun=embed_sum) # generate_bert('data/msra/dev.auto.short.sent.txt', 'data/embedding/bert_base_sum/msra.dev.auto.bert', # embed_fun=embed_sum) # generate_bert('data/msra/train.auto.short.sent.txt', 'data/embedding/bert_base_sum/msra.train.auto.bert', # embed_fun=embed_sum) # generate_bert('data/ctb5/dev.sent.txt', 'data/embedding/bert_base_sum/ctb.dev.bert', # embed_fun=embed_sum) # generate_bert('data/ctb5/test.sent.txt', 'data/embedding/bert_base_sum/ctb.test.bert', # embed_fun=embed_sum) # generate_bert('data/ctb5/train.sent.txt', 'data/embedding/bert_base_sum/ctb.train.bert', # embed_fun=embed_sum)
[ "# -*- coding:utf-8 -*-\n# Author: hankcs\n# Date: 2019-01-13 15:01\nimport pickle\n\nimport numpy as np\nfrom bert_serving.client import BertClient\nfrom pyhanlp import *\n\nCharTable = JClass('com.hankcs.hanlp.dictionary.other.CharTable')\n\n# bc = BertClient(ip='192.168.1.88') # ip address of the server\nbc = BertClient(ip='127.0.0.1') # ip address of the GPU machine\n\n\ndef embed_last_token(text):\n result = bc.encode(text, show_tokens=True)\n # print(result)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n valid = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n if buffer == words[tid]:\n valid.append(i)\n buffer = ''\n tid += 1\n # print(len(valid))\n # exit()\n if len(valid) != len(sent.split()) or tid != len(words):\n print(valid)\n print(sent.split())\n print(result[1])\n batch.append(tensor[valid, :])\n return batch\n\n\ndef embed_sum(text):\n result = bc.encode(text, show_tokens=True)\n # print(result)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n token_tensor = []\n sent_tensor = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n token_tensor.append(tensor[i, :])\n if buffer == words[tid]:\n sent_tensor.append(np.stack(token_tensor).mean(axis=0))\n token_tensor = []\n buffer = ''\n tid += 1\n # print(len(valid))\n # exit()\n if tid != len(words) or len(sent_tensor) != len(words):\n print(sent.split())\n print(tokens)\n exit()\n batch.append(np.stack(sent_tensor))\n return batch\n\n\ndef generate_bert(path, output, embed_fun=embed_sum):\n print(output)\n total = 0\n with open(path) as src:\n batch = []\n tensor = []\n for line in src:\n line = line.strip()\n if len(line) == 0:\n continue\n batch.append(CharTable.convert(line).replace('—', '-')\n .replace('‘', '\\'')\n .replace('…', '.')\n .replace('坜', '壢')\n .replace('唛', '麦')\n .replace('ㄅㄆㄇㄈ', '呀呀')\n .replace('’', '\\''))\n if len(batch) and len(batch) % 100 == 0:\n tensor.extend(embed_fun(batch))\n total += len(batch)\n print(total)\n batch = []\n if len(batch):\n tensor.extend(embed_fun(batch))\n total += len(batch)\n print(total)\n with open(output, 'wb') as f:\n pickle.dump(tensor, f)\n\n\nif __name__ == '__main__':\n # generate_bert('data/SemEval-2016/news.test.sent.txt', 'data/SemEval-2016/news.test.bert', embed_fun=embed_sum)\n # generate_bert('data/SemEval-2016/news.valid.sent.txt', 'data/SemEval-2016/news.valid.bert', embed_fun=embed_sum)\n # generate_bert('data/SemEval-2016/news.train.sent.txt', 'data/SemEval-2016/news.train.bert', embed_fun=embed_sum)\n #\n # generate_bert('data/SemEval-2016/text.test.sent.txt', 'data/SemEval-2016/text.test.bert', embed_fun=embed_sum)\n # generate_bert('data/SemEval-2016/text.valid.sent.txt', 'data/SemEval-2016/text.valid.bert', embed_fun=embed_sum)\n # generate_bert('data/SemEval-2016/text.train.sent.txt', 'data/SemEval-2016/text.train.bert', embed_fun=embed_sum)\n\n generate_bert('data/semeval15/cz.pas.dev.sent.txt', 'data/embedding/bert_base_sum/cz.pas.dev.bert',\n embed_fun=embed_sum)\n generate_bert('data/semeval15/cz.pas.train.sent.txt', 'data/embedding/bert_base_sum/cz.pas.train.bert',\n embed_fun=embed_sum)\n generate_bert('data/semeval15/cz.id.pas.sent.txt', 'data/embedding/bert_base_sum/cz.id.pas.bert',\n embed_fun=embed_sum)\n\n # generate_bert('data/ctb5.1-pos/dev.short.sent.txt', 'data/embedding/bert_base_sum/ctb.pos.dev.bert',\n # embed_fun=embed_sum)\n # generate_bert('data/ctb5.1-pos/test.short.sent.txt', 'data/embedding/bert_base_sum/ctb.pos.test.bert',\n # embed_fun=embed_sum)\n # generate_bert('data/ctb5.1-pos/train.short.sent.txt', 'data/embedding/bert_base_sum/ctb.pos.train.bert',\n # embed_fun=embed_sum)\n\n # generate_bert('data/msra/dev.short.sent.txt', 'data/embedding/bert_base_sum/msra.dev.bert',\n # embed_fun=embed_sum)\n # generate_bert('data/msra/test.short.sent.txt', 'data/embedding/bert_base_sum/msra.test.bert',\n # embed_fun=embed_sum)\n # generate_bert('data/msra/train.short.sent.txt', 'data/embedding/bert_base_sum/msra.train.bert',\n # embed_fun=embed_sum)\n # generate_bert('data/msra/test.auto.short.sent.txt', 'data/embedding/bert_base_sum/msra.test.auto.bert',\n # embed_fun=embed_sum)\n\n # generate_bert('data/msra/test.auto.short.sent.txt', 'data/embedding/bert_base_sum/msra.test.auto.bert',\n # embed_fun=embed_sum)\n # generate_bert('data/msra/dev.auto.short.sent.txt', 'data/embedding/bert_base_sum/msra.dev.auto.bert',\n # embed_fun=embed_sum)\n # generate_bert('data/msra/train.auto.short.sent.txt', 'data/embedding/bert_base_sum/msra.train.auto.bert',\n # embed_fun=embed_sum)\n\n # generate_bert('data/ctb5/dev.sent.txt', 'data/embedding/bert_base_sum/ctb.dev.bert',\n # embed_fun=embed_sum)\n # generate_bert('data/ctb5/test.sent.txt', 'data/embedding/bert_base_sum/ctb.test.bert',\n # embed_fun=embed_sum)\n # generate_bert('data/ctb5/train.sent.txt', 'data/embedding/bert_base_sum/ctb.train.bert',\n # embed_fun=embed_sum)\n", "import pickle\nimport numpy as np\nfrom bert_serving.client import BertClient\nfrom pyhanlp import *\nCharTable = JClass('com.hankcs.hanlp.dictionary.other.CharTable')\nbc = BertClient(ip='127.0.0.1')\n\n\ndef embed_last_token(text):\n result = bc.encode(text, show_tokens=True)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n valid = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n if buffer == words[tid]:\n valid.append(i)\n buffer = ''\n tid += 1\n if len(valid) != len(sent.split()) or tid != len(words):\n print(valid)\n print(sent.split())\n print(result[1])\n batch.append(tensor[valid, :])\n return batch\n\n\ndef embed_sum(text):\n result = bc.encode(text, show_tokens=True)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n token_tensor = []\n sent_tensor = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n token_tensor.append(tensor[i, :])\n if buffer == words[tid]:\n sent_tensor.append(np.stack(token_tensor).mean(axis=0))\n token_tensor = []\n buffer = ''\n tid += 1\n if tid != len(words) or len(sent_tensor) != len(words):\n print(sent.split())\n print(tokens)\n exit()\n batch.append(np.stack(sent_tensor))\n return batch\n\n\ndef generate_bert(path, output, embed_fun=embed_sum):\n print(output)\n total = 0\n with open(path) as src:\n batch = []\n tensor = []\n for line in src:\n line = line.strip()\n if len(line) == 0:\n continue\n batch.append(CharTable.convert(line).replace('—', '-').replace(\n '‘', \"'\").replace('…', '.').replace('坜', '壢').replace('唛',\n '麦').replace('ㄅㄆㄇㄈ', '呀呀').replace('’', \"'\"))\n if len(batch) and len(batch) % 100 == 0:\n tensor.extend(embed_fun(batch))\n total += len(batch)\n print(total)\n batch = []\n if len(batch):\n tensor.extend(embed_fun(batch))\n total += len(batch)\n print(total)\n with open(output, 'wb') as f:\n pickle.dump(tensor, f)\n\n\nif __name__ == '__main__':\n generate_bert('data/semeval15/cz.pas.dev.sent.txt',\n 'data/embedding/bert_base_sum/cz.pas.dev.bert', embed_fun=embed_sum)\n generate_bert('data/semeval15/cz.pas.train.sent.txt',\n 'data/embedding/bert_base_sum/cz.pas.train.bert', embed_fun=embed_sum)\n generate_bert('data/semeval15/cz.id.pas.sent.txt',\n 'data/embedding/bert_base_sum/cz.id.pas.bert', embed_fun=embed_sum)\n", "<import token>\nCharTable = JClass('com.hankcs.hanlp.dictionary.other.CharTable')\nbc = BertClient(ip='127.0.0.1')\n\n\ndef embed_last_token(text):\n result = bc.encode(text, show_tokens=True)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n valid = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n if buffer == words[tid]:\n valid.append(i)\n buffer = ''\n tid += 1\n if len(valid) != len(sent.split()) or tid != len(words):\n print(valid)\n print(sent.split())\n print(result[1])\n batch.append(tensor[valid, :])\n return batch\n\n\ndef embed_sum(text):\n result = bc.encode(text, show_tokens=True)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n token_tensor = []\n sent_tensor = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n token_tensor.append(tensor[i, :])\n if buffer == words[tid]:\n sent_tensor.append(np.stack(token_tensor).mean(axis=0))\n token_tensor = []\n buffer = ''\n tid += 1\n if tid != len(words) or len(sent_tensor) != len(words):\n print(sent.split())\n print(tokens)\n exit()\n batch.append(np.stack(sent_tensor))\n return batch\n\n\ndef generate_bert(path, output, embed_fun=embed_sum):\n print(output)\n total = 0\n with open(path) as src:\n batch = []\n tensor = []\n for line in src:\n line = line.strip()\n if len(line) == 0:\n continue\n batch.append(CharTable.convert(line).replace('—', '-').replace(\n '‘', \"'\").replace('…', '.').replace('坜', '壢').replace('唛',\n '麦').replace('ㄅㄆㄇㄈ', '呀呀').replace('’', \"'\"))\n if len(batch) and len(batch) % 100 == 0:\n tensor.extend(embed_fun(batch))\n total += len(batch)\n print(total)\n batch = []\n if len(batch):\n tensor.extend(embed_fun(batch))\n total += len(batch)\n print(total)\n with open(output, 'wb') as f:\n pickle.dump(tensor, f)\n\n\nif __name__ == '__main__':\n generate_bert('data/semeval15/cz.pas.dev.sent.txt',\n 'data/embedding/bert_base_sum/cz.pas.dev.bert', embed_fun=embed_sum)\n generate_bert('data/semeval15/cz.pas.train.sent.txt',\n 'data/embedding/bert_base_sum/cz.pas.train.bert', embed_fun=embed_sum)\n generate_bert('data/semeval15/cz.id.pas.sent.txt',\n 'data/embedding/bert_base_sum/cz.id.pas.bert', embed_fun=embed_sum)\n", "<import token>\n<assignment token>\n\n\ndef embed_last_token(text):\n result = bc.encode(text, show_tokens=True)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n valid = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n if buffer == words[tid]:\n valid.append(i)\n buffer = ''\n tid += 1\n if len(valid) != len(sent.split()) or tid != len(words):\n print(valid)\n print(sent.split())\n print(result[1])\n batch.append(tensor[valid, :])\n return batch\n\n\ndef embed_sum(text):\n result = bc.encode(text, show_tokens=True)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n token_tensor = []\n sent_tensor = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n token_tensor.append(tensor[i, :])\n if buffer == words[tid]:\n sent_tensor.append(np.stack(token_tensor).mean(axis=0))\n token_tensor = []\n buffer = ''\n tid += 1\n if tid != len(words) or len(sent_tensor) != len(words):\n print(sent.split())\n print(tokens)\n exit()\n batch.append(np.stack(sent_tensor))\n return batch\n\n\ndef generate_bert(path, output, embed_fun=embed_sum):\n print(output)\n total = 0\n with open(path) as src:\n batch = []\n tensor = []\n for line in src:\n line = line.strip()\n if len(line) == 0:\n continue\n batch.append(CharTable.convert(line).replace('—', '-').replace(\n '‘', \"'\").replace('…', '.').replace('坜', '壢').replace('唛',\n '麦').replace('ㄅㄆㄇㄈ', '呀呀').replace('’', \"'\"))\n if len(batch) and len(batch) % 100 == 0:\n tensor.extend(embed_fun(batch))\n total += len(batch)\n print(total)\n batch = []\n if len(batch):\n tensor.extend(embed_fun(batch))\n total += len(batch)\n print(total)\n with open(output, 'wb') as f:\n pickle.dump(tensor, f)\n\n\nif __name__ == '__main__':\n generate_bert('data/semeval15/cz.pas.dev.sent.txt',\n 'data/embedding/bert_base_sum/cz.pas.dev.bert', embed_fun=embed_sum)\n generate_bert('data/semeval15/cz.pas.train.sent.txt',\n 'data/embedding/bert_base_sum/cz.pas.train.bert', embed_fun=embed_sum)\n generate_bert('data/semeval15/cz.id.pas.sent.txt',\n 'data/embedding/bert_base_sum/cz.id.pas.bert', embed_fun=embed_sum)\n", "<import token>\n<assignment token>\n\n\ndef embed_last_token(text):\n result = bc.encode(text, show_tokens=True)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n valid = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n if buffer == words[tid]:\n valid.append(i)\n buffer = ''\n tid += 1\n if len(valid) != len(sent.split()) or tid != len(words):\n print(valid)\n print(sent.split())\n print(result[1])\n batch.append(tensor[valid, :])\n return batch\n\n\ndef embed_sum(text):\n result = bc.encode(text, show_tokens=True)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n token_tensor = []\n sent_tensor = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n token_tensor.append(tensor[i, :])\n if buffer == words[tid]:\n sent_tensor.append(np.stack(token_tensor).mean(axis=0))\n token_tensor = []\n buffer = ''\n tid += 1\n if tid != len(words) or len(sent_tensor) != len(words):\n print(sent.split())\n print(tokens)\n exit()\n batch.append(np.stack(sent_tensor))\n return batch\n\n\ndef generate_bert(path, output, embed_fun=embed_sum):\n print(output)\n total = 0\n with open(path) as src:\n batch = []\n tensor = []\n for line in src:\n line = line.strip()\n if len(line) == 0:\n continue\n batch.append(CharTable.convert(line).replace('—', '-').replace(\n '‘', \"'\").replace('…', '.').replace('坜', '壢').replace('唛',\n '麦').replace('ㄅㄆㄇㄈ', '呀呀').replace('’', \"'\"))\n if len(batch) and len(batch) % 100 == 0:\n tensor.extend(embed_fun(batch))\n total += len(batch)\n print(total)\n batch = []\n if len(batch):\n tensor.extend(embed_fun(batch))\n total += len(batch)\n print(total)\n with open(output, 'wb') as f:\n pickle.dump(tensor, f)\n\n\n<code token>\n", "<import token>\n<assignment token>\n\n\ndef embed_last_token(text):\n result = bc.encode(text, show_tokens=True)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n valid = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n if buffer == words[tid]:\n valid.append(i)\n buffer = ''\n tid += 1\n if len(valid) != len(sent.split()) or tid != len(words):\n print(valid)\n print(sent.split())\n print(result[1])\n batch.append(tensor[valid, :])\n return batch\n\n\ndef embed_sum(text):\n result = bc.encode(text, show_tokens=True)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n token_tensor = []\n sent_tensor = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n token_tensor.append(tensor[i, :])\n if buffer == words[tid]:\n sent_tensor.append(np.stack(token_tensor).mean(axis=0))\n token_tensor = []\n buffer = ''\n tid += 1\n if tid != len(words) or len(sent_tensor) != len(words):\n print(sent.split())\n print(tokens)\n exit()\n batch.append(np.stack(sent_tensor))\n return batch\n\n\n<function token>\n<code token>\n", "<import token>\n<assignment token>\n<function token>\n\n\ndef embed_sum(text):\n result = bc.encode(text, show_tokens=True)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n token_tensor = []\n sent_tensor = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n token_tensor.append(tensor[i, :])\n if buffer == words[tid]:\n sent_tensor.append(np.stack(token_tensor).mean(axis=0))\n token_tensor = []\n buffer = ''\n tid += 1\n if tid != len(words) or len(sent_tensor) != len(words):\n print(sent.split())\n print(tokens)\n exit()\n batch.append(np.stack(sent_tensor))\n return batch\n\n\n<function token>\n<code token>\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<code token>\n" ]
false
349
816b1a932208a4525230dd886adf8c67dec3af3e
# content of conftest.py # adapted from http://pytest.org/latest/example/special.html import pytest import requests def tear_down(): ''' conftest.py tear_down - the last to go.... ''' print("\nTEARDOWN after all tests") @pytest.fixture(scope="session", autouse=True) def set_up(request): ''' conftest.py set_up - the first to start.... ''' print("\nSETUP before all tests") request.addfinalizer(tear_down)
[ "# content of conftest.py\n# adapted from http://pytest.org/latest/example/special.html\n\nimport pytest\nimport requests\n\ndef tear_down():\n ''' conftest.py tear_down - the last to go.... '''\n print(\"\\nTEARDOWN after all tests\")\n \n\[email protected](scope=\"session\", autouse=True)\ndef set_up(request):\n ''' conftest.py set_up - the first to start.... '''\n\n print(\"\\nSETUP before all tests\")\n request.addfinalizer(tear_down)\n", "import pytest\nimport requests\n\n\ndef tear_down():\n \"\"\" conftest.py tear_down - the last to go.... \"\"\"\n print('\\nTEARDOWN after all tests')\n\n\[email protected](scope='session', autouse=True)\ndef set_up(request):\n \"\"\" conftest.py set_up - the first to start.... \"\"\"\n print('\\nSETUP before all tests')\n request.addfinalizer(tear_down)\n", "<import token>\n\n\ndef tear_down():\n \"\"\" conftest.py tear_down - the last to go.... \"\"\"\n print('\\nTEARDOWN after all tests')\n\n\[email protected](scope='session', autouse=True)\ndef set_up(request):\n \"\"\" conftest.py set_up - the first to start.... \"\"\"\n print('\\nSETUP before all tests')\n request.addfinalizer(tear_down)\n", "<import token>\n\n\ndef tear_down():\n \"\"\" conftest.py tear_down - the last to go.... \"\"\"\n print('\\nTEARDOWN after all tests')\n\n\n<function token>\n", "<import token>\n<function token>\n<function token>\n" ]
false
350
9096ed4b68d2bef92df7db98589e744ddf3efad0
import matplotlib.pyplot as plt from shapely.geometry import MultiLineString, Polygon mls = MultiLineString([[(0, 1), (5, 1)], [(1, 2), (1, 0)]]) p = Polygon([(0.5, 0.5), (0.5, 1.5), (2, 1.5), (2, 0.5)]) results = mls.intersection(p) plt.subplot(1, 2, 1) for ls in mls: plt.plot(*ls.xy) plt.plot(*p.boundary.xy, "-.k") plt.xlim([0, 5]) plt.ylim([0, 2]) plt.subplot(1, 2, 2) for ls in results: plt.plot(*ls.xy) plt.xlim([0, 5]) plt.ylim([0, 2]) plt.show()
[ "import matplotlib.pyplot as plt\nfrom shapely.geometry import MultiLineString, Polygon\n\nmls = MultiLineString([[(0, 1), (5, 1)], [(1, 2), (1, 0)]])\np = Polygon([(0.5, 0.5), (0.5, 1.5), (2, 1.5), (2, 0.5)])\nresults = mls.intersection(p)\n\nplt.subplot(1, 2, 1)\nfor ls in mls:\n plt.plot(*ls.xy)\nplt.plot(*p.boundary.xy, \"-.k\")\nplt.xlim([0, 5])\nplt.ylim([0, 2])\n\nplt.subplot(1, 2, 2)\nfor ls in results:\n plt.plot(*ls.xy)\nplt.xlim([0, 5])\nplt.ylim([0, 2])\n\nplt.show()\n", "import matplotlib.pyplot as plt\nfrom shapely.geometry import MultiLineString, Polygon\nmls = MultiLineString([[(0, 1), (5, 1)], [(1, 2), (1, 0)]])\np = Polygon([(0.5, 0.5), (0.5, 1.5), (2, 1.5), (2, 0.5)])\nresults = mls.intersection(p)\nplt.subplot(1, 2, 1)\nfor ls in mls:\n plt.plot(*ls.xy)\nplt.plot(*p.boundary.xy, '-.k')\nplt.xlim([0, 5])\nplt.ylim([0, 2])\nplt.subplot(1, 2, 2)\nfor ls in results:\n plt.plot(*ls.xy)\nplt.xlim([0, 5])\nplt.ylim([0, 2])\nplt.show()\n", "<import token>\nmls = MultiLineString([[(0, 1), (5, 1)], [(1, 2), (1, 0)]])\np = Polygon([(0.5, 0.5), (0.5, 1.5), (2, 1.5), (2, 0.5)])\nresults = mls.intersection(p)\nplt.subplot(1, 2, 1)\nfor ls in mls:\n plt.plot(*ls.xy)\nplt.plot(*p.boundary.xy, '-.k')\nplt.xlim([0, 5])\nplt.ylim([0, 2])\nplt.subplot(1, 2, 2)\nfor ls in results:\n plt.plot(*ls.xy)\nplt.xlim([0, 5])\nplt.ylim([0, 2])\nplt.show()\n", "<import token>\n<assignment token>\nplt.subplot(1, 2, 1)\nfor ls in mls:\n plt.plot(*ls.xy)\nplt.plot(*p.boundary.xy, '-.k')\nplt.xlim([0, 5])\nplt.ylim([0, 2])\nplt.subplot(1, 2, 2)\nfor ls in results:\n plt.plot(*ls.xy)\nplt.xlim([0, 5])\nplt.ylim([0, 2])\nplt.show()\n", "<import token>\n<assignment token>\n<code token>\n" ]
false
351
c99f1333c5ca3221e9932d9a9ba1d95a77924f0d
import sys import math def get_max_sum(arr): max_sum = -math.inf for i in range(1, 5): for j in range(1, 5): temp = arr[i][j]+arr[i-1][j-1]+arr[i-1][j]+arr[i-1][j+1]+arr[i+1][j+1]+arr[i+1][j]+arr[i+1][j-1] max_sum = max(max_sum, temp) return max_sum def main(): sys_in = sys.stdin sys_out = sys.stdout arr = [] for _ in range(6): temp = list(map(int, sys.stdin.readline().split())) arr.append(temp) print(get_max_sum(arr)) if __name__ == '__main__': main()
[ "import sys\nimport math\n\n\ndef get_max_sum(arr):\n\n max_sum = -math.inf\n for i in range(1, 5):\n for j in range(1, 5):\n temp = arr[i][j]+arr[i-1][j-1]+arr[i-1][j]+arr[i-1][j+1]+arr[i+1][j+1]+arr[i+1][j]+arr[i+1][j-1]\n max_sum = max(max_sum, temp)\n\n\n return max_sum\n\n\ndef main():\n sys_in = sys.stdin\n sys_out = sys.stdout\n\n arr = []\n\n for _ in range(6):\n temp = list(map(int, sys.stdin.readline().split()))\n arr.append(temp)\n\n\n print(get_max_sum(arr))\n\n\n\nif __name__ == '__main__':\n main()", "import sys\nimport math\n\n\ndef get_max_sum(arr):\n max_sum = -math.inf\n for i in range(1, 5):\n for j in range(1, 5):\n temp = arr[i][j] + arr[i - 1][j - 1] + arr[i - 1][j] + arr[i - 1][\n j + 1] + arr[i + 1][j + 1] + arr[i + 1][j] + arr[i + 1][j - 1]\n max_sum = max(max_sum, temp)\n return max_sum\n\n\ndef main():\n sys_in = sys.stdin\n sys_out = sys.stdout\n arr = []\n for _ in range(6):\n temp = list(map(int, sys.stdin.readline().split()))\n arr.append(temp)\n print(get_max_sum(arr))\n\n\nif __name__ == '__main__':\n main()\n", "<import token>\n\n\ndef get_max_sum(arr):\n max_sum = -math.inf\n for i in range(1, 5):\n for j in range(1, 5):\n temp = arr[i][j] + arr[i - 1][j - 1] + arr[i - 1][j] + arr[i - 1][\n j + 1] + arr[i + 1][j + 1] + arr[i + 1][j] + arr[i + 1][j - 1]\n max_sum = max(max_sum, temp)\n return max_sum\n\n\ndef main():\n sys_in = sys.stdin\n sys_out = sys.stdout\n arr = []\n for _ in range(6):\n temp = list(map(int, sys.stdin.readline().split()))\n arr.append(temp)\n print(get_max_sum(arr))\n\n\nif __name__ == '__main__':\n main()\n", "<import token>\n\n\ndef get_max_sum(arr):\n max_sum = -math.inf\n for i in range(1, 5):\n for j in range(1, 5):\n temp = arr[i][j] + arr[i - 1][j - 1] + arr[i - 1][j] + arr[i - 1][\n j + 1] + arr[i + 1][j + 1] + arr[i + 1][j] + arr[i + 1][j - 1]\n max_sum = max(max_sum, temp)\n return max_sum\n\n\ndef main():\n sys_in = sys.stdin\n sys_out = sys.stdout\n arr = []\n for _ in range(6):\n temp = list(map(int, sys.stdin.readline().split()))\n arr.append(temp)\n print(get_max_sum(arr))\n\n\n<code token>\n", "<import token>\n<function token>\n\n\ndef main():\n sys_in = sys.stdin\n sys_out = sys.stdout\n arr = []\n for _ in range(6):\n temp = list(map(int, sys.stdin.readline().split()))\n arr.append(temp)\n print(get_max_sum(arr))\n\n\n<code token>\n", "<import token>\n<function token>\n<function token>\n<code token>\n" ]
false
352
78c9f92349ba834bc64dc84f884638c4316a9ea4
INPUT_MINBIAS = '/build/RAWReference/MinBias_RAW_320_STARTUP.root' INPUT_TTBAR = '/build/RAWReference/TTbar_RAW_320_STARTUP.root' puSTARTUP_TTBAR = '/build/RAWReference/TTbar_Tauola_PileUp_RAW_320_STARTUP.root' relval = { 'step1': { 'step': 'GEN-HLT', 'timesize': (100, ['MinBias','TTbar']), 'igprof': (50, ['TTbar']), 'memcheck': (5, ['TTbar']), 'pileup': ['TTbar'], #??? 'pileupInput': '', 'cmsdriver': '--eventcontent RAWSIM --conditions auto:mc' }, 'step2': { 'step': 'RAW2DIGI-RECO', 'timesize': (8000, ['MinBias','TTbar']), 'igprof': (200, ['TTbar']), 'memcheck': (5, ['TTbar']), 'pileup': ['TTbar'], 'pileupInput': puSTARTUP_TTBAR, 'fileInput': [INPUT_MINBIAS,INPUT_TTBAR], 'cmsdriver': '--eventcontent RECOSIM --conditions auto:startup' }, 'GENSIMDIGI': { 'step': 'GEN-SIM,DIGI', 'timesize': (100, ['MinBias','SingleElectronE1000','SingleMuMinusPt10','SinglePiMinusE1000','TTbar']), 'igprof': (5, ['TTbar']), 'memcheck': (5, ['TTbar']), 'pileup': ['TTbar'], #??? 'pileupInput': '', 'fileInput': '', 'cmsdriver': '--eventcontent FEVTDEBUG --conditions auto:mc' }, 'HLT': { 'step': 'HLT', 'timesize': (8000, ['MinBias','TTbar']), 'igprof': (500, ['TTbar']), 'memcheck': (5, ['TTbar']), 'pileup': ['TTbar'], 'pileupInput': puSTARTUP_TTBAR, 'fileInput': [INPUT_MINBIAS,INPUT_TTBAR], 'cmsdriver': '--eventcontent RAWSIM --conditions auto:startup --processName HLTFROMRAW' }, 'FASTSIM': { 'step': 'GEN-FASTSIM', 'timesize': (8000, ['MinBias','TTbar']), 'igprof': (500, ['TTbar']), 'memcheck': (5, ['TTbar']), 'pileup': ['TTbar'], 'cmsdriver': '--eventcontent RECOSIM --conditions auto:mc' } }
[ "INPUT_MINBIAS = '/build/RAWReference/MinBias_RAW_320_STARTUP.root'\nINPUT_TTBAR = '/build/RAWReference/TTbar_RAW_320_STARTUP.root'\n\npuSTARTUP_TTBAR = '/build/RAWReference/TTbar_Tauola_PileUp_RAW_320_STARTUP.root'\n\nrelval = {\n 'step1': {\t'step': 'GEN-HLT',\n\t\t\t'timesize': (100, ['MinBias','TTbar']),\n\t\t\t'igprof': (50, ['TTbar']),\n\t\t\t'memcheck': (5, ['TTbar']),\n\t\t\t'pileup': ['TTbar'],\n#???\t\t\t'pileupInput': '',\n\t\t\t'cmsdriver': '--eventcontent RAWSIM --conditions auto:mc' },\n\n\t'step2': {\t'step': 'RAW2DIGI-RECO',\n\t\t\t'timesize': (8000, ['MinBias','TTbar']),\n\t \t\t'igprof': (200, ['TTbar']),\n\t\t\t'memcheck': (5, ['TTbar']),\n\t\t\t'pileup': ['TTbar'],\n\t\t\t'pileupInput': puSTARTUP_TTBAR,\n\t\t\t'fileInput': [INPUT_MINBIAS,INPUT_TTBAR],\n\t\t\t'cmsdriver': '--eventcontent RECOSIM --conditions auto:startup' },\n\n\t'GENSIMDIGI': {\t'step': 'GEN-SIM,DIGI',\n\t\t\t'timesize': (100, ['MinBias','SingleElectronE1000','SingleMuMinusPt10','SinglePiMinusE1000','TTbar']),\n\t\t\t'igprof': (5, ['TTbar']),\n\t\t\t'memcheck': (5, ['TTbar']),\n\t\t\t'pileup': ['TTbar'],\n#???\t\t\t'pileupInput': '',\n\t\t\t'fileInput': '',\n\t\t\t'cmsdriver': '--eventcontent FEVTDEBUG --conditions auto:mc' },\n\n\t'HLT': { 'step': 'HLT',\n\t\t\t'timesize': (8000, ['MinBias','TTbar']),\n\t\t\t'igprof': (500, ['TTbar']),\n\t\t\t'memcheck': (5, ['TTbar']),\n\t\t\t'pileup': ['TTbar'],\n\t\t\t'pileupInput': puSTARTUP_TTBAR,\n\t\t\t'fileInput': [INPUT_MINBIAS,INPUT_TTBAR],\n\t\t\t'cmsdriver': '--eventcontent RAWSIM --conditions auto:startup --processName HLTFROMRAW' },\n\n\t'FASTSIM': {\t'step': 'GEN-FASTSIM',\n\t\t\t'timesize': (8000, ['MinBias','TTbar']),\n\t\t\t'igprof': (500, ['TTbar']),\n\t\t\t'memcheck': (5, ['TTbar']),\n\t\t\t'pileup': ['TTbar'],\n\t\t\t'cmsdriver': '--eventcontent RECOSIM --conditions auto:mc' }\n}\n", "INPUT_MINBIAS = '/build/RAWReference/MinBias_RAW_320_STARTUP.root'\nINPUT_TTBAR = '/build/RAWReference/TTbar_RAW_320_STARTUP.root'\npuSTARTUP_TTBAR = (\n '/build/RAWReference/TTbar_Tauola_PileUp_RAW_320_STARTUP.root')\nrelval = {'step1': {'step': 'GEN-HLT', 'timesize': (100, ['MinBias',\n 'TTbar']), 'igprof': (50, ['TTbar']), 'memcheck': (5, ['TTbar']),\n 'pileup': ['TTbar'], 'cmsdriver':\n '--eventcontent RAWSIM --conditions auto:mc'}, 'step2': {'step':\n 'RAW2DIGI-RECO', 'timesize': (8000, ['MinBias', 'TTbar']), 'igprof': (\n 200, ['TTbar']), 'memcheck': (5, ['TTbar']), 'pileup': ['TTbar'],\n 'pileupInput': puSTARTUP_TTBAR, 'fileInput': [INPUT_MINBIAS,\n INPUT_TTBAR], 'cmsdriver':\n '--eventcontent RECOSIM --conditions auto:startup'}, 'GENSIMDIGI': {\n 'step': 'GEN-SIM,DIGI', 'timesize': (100, ['MinBias',\n 'SingleElectronE1000', 'SingleMuMinusPt10', 'SinglePiMinusE1000',\n 'TTbar']), 'igprof': (5, ['TTbar']), 'memcheck': (5, ['TTbar']),\n 'pileup': ['TTbar'], 'fileInput': '', 'cmsdriver':\n '--eventcontent FEVTDEBUG --conditions auto:mc'}, 'HLT': {'step': 'HLT',\n 'timesize': (8000, ['MinBias', 'TTbar']), 'igprof': (500, ['TTbar']),\n 'memcheck': (5, ['TTbar']), 'pileup': ['TTbar'], 'pileupInput':\n puSTARTUP_TTBAR, 'fileInput': [INPUT_MINBIAS, INPUT_TTBAR], 'cmsdriver':\n '--eventcontent RAWSIM --conditions auto:startup --processName HLTFROMRAW'\n }, 'FASTSIM': {'step': 'GEN-FASTSIM', 'timesize': (8000, ['MinBias',\n 'TTbar']), 'igprof': (500, ['TTbar']), 'memcheck': (5, ['TTbar']),\n 'pileup': ['TTbar'], 'cmsdriver':\n '--eventcontent RECOSIM --conditions auto:mc'}}\n", "<assignment token>\n" ]
false
353
c6b261a09b2982e17704f847586bbf38d27cb786
from ._sinAction import * from ._sinActionFeedback import * from ._sinActionGoal import * from ._sinActionResult import * from ._sinFeedback import * from ._sinGoal import * from ._sinResult import *
[ "from ._sinAction import *\nfrom ._sinActionFeedback import *\nfrom ._sinActionGoal import *\nfrom ._sinActionResult import *\nfrom ._sinFeedback import *\nfrom ._sinGoal import *\nfrom ._sinResult import *\n", "<import token>\n" ]
false
354
f4306f80330850415b74d729384f360489644e39
import unittest import numpy import pandas as pd import fixtures.examples_validate as examples from cellxgene_schema.validate import Validator from cellxgene_schema.write_labels import AnnDataLabelAppender # Tests for schema compliance of an AnnData object class TestValidAnndata(unittest.TestCase): """ Tests a valid AnnData object. Most other tests below modify this AnnData object and test for failure cases. The valid AnnData object has all valid cases described in the schema. """ def setUp(self): self.validator = Validator() self.validator.adata = examples.adata.copy() def test_valid_anndata(self): self.validator.validate_adata() self.assertFalse(self.validator.errors) class TestH5adValidation(unittest.TestCase): """ Checks that validation from h5ad works, only does one invalid example as extensive testing is done in the classes below """ def setUp(self): self.h5ad_valid_file = examples.h5ad_valid self.h5ad_invalid_file = examples.h5ad_invalid self.validator = Validator() def test_validate(self): # Valid h5ad self.assertTrue(self.validator.validate_adata(self.h5ad_valid_file)) # Invalid h5ads self.assertFalse(self.validator.validate_adata(self.h5ad_invalid_file)) class TestExpressionMatrix(unittest.TestCase): """ Fail cases for expression matrices (anndata.X and anndata.raw.X) """ def setUp(self): self.validator = Validator() self.validator.adata = examples.adata.copy() def test_shapes(self): """ All matrix layers MUST have the same shape, and have the same cell labels and gene labels. """ # Creates a raw layer self.validator.adata.raw = self.validator.adata self.validator.adata.raw.var.drop("feature_is_filtered", axis=1, inplace=True) self.validator.adata.X = examples.adata_non_raw.X.copy() self.validator.adata.uns["X_normalization"] = "CPM" # remove one gene self.validator.adata = self.validator.adata[:, 1:] self.validator.validate_adata() self.assertEqual( self.validator.errors, ["ERROR: Number of genes in X (3) is different than raw.X (4)."], ) def test_sparsity(self): """ In any layer, if a matrix has 50% or more values that are zeros, it is STRONGLY RECOMMENDED that the matrix be encoded as a scipy.sparse.csr_matrix """ self.validator.adata.X = self.validator.adata.X.toarray() self.validator.validate_adata() self.assertEqual( self.validator.warnings, [ "WARNING: Sparsity of 'X' is 0.875 which is greater than 0.5, " "and it is not a 'scipy.sparse.csr_matrix'. It is " "STRONGLY RECOMMENDED to use this type of matrix for " "the given sparsity." ], ) def test_raw_existence(self): """ Except for ATAC-seq and methylation data, raw data is REQUIRED """ # RNA - raw layer required del self.validator.adata.raw self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: Raw data is missing: there is no 'raw.X' and 'X_normalization' is not 'none'." ], ) # ATAC - raw layer not required # The assignment above makes X to not be raw: self.validator.adata.uns["X_normalization"] = "CPM" # The following line makes it to be scATAC-seq data (EFO:0010891) # Missing raw data in atac-seq data is allowed, thus the following should not return an error message self.validator.errors = [] self.validator.adata.obs["assay_ontology_term_id"] = "EFO:0010891" self.validator.validate_adata() self.assertEqual(self.validator.errors, []) def test_final_strongly_recommended(self): """ Except for ATAC-seq and methylation data, final matrix is STRONGLY RECOMMENDED """ # move raw to X amd: i.e. there is no final self.validator.adata.X = self.validator.adata.raw.X del self.validator.adata.raw self.validator.adata.uns["X_normalization"] = "none" self.validator.validate_adata() self.assertEqual( self.validator.warnings, [ "WARNING: Only raw data was found, i.e. there is no 'raw.X' and 'uns['X_normalization']' is 'none'. " "It is STRONGLY RECOMMENDED that 'final' (normalized) data is provided." ], ) class TestObs(unittest.TestCase): """ Fail cases in adata.uns """ def setUp(self): self.validator = Validator() self.validator.adata = examples.adata.copy() def test_column_presence(self): """ obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe. """ columns = [ "assay_ontology_term_id", "development_stage_ontology_term_id", "disease_ontology_term_id", "ethnicity_ontology_term_id", "is_primary_data", "sex_ontology_term_id", "tissue_ontology_term_id", ] for column in columns: with self.subTest(column=column): self.validator.errors = [] self.validator.adata = examples.adata.copy() self.validator.adata.obs.drop(column, axis=1, inplace=True) # Remove batch condition because it has a dependency with is_primary_data self.validator.adata.uns.pop("batch_condition") self.validator.validate_adata() self.assertEqual( self.validator.errors, [f"ERROR: Dataframe 'obs' is missing " f"column '{column}'."], ) def test_column_presence_organism(self): """ obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe. A separate check is need for organism_ontology_term_id because removing from anndata results in multiple errors given that other columns depend on its presence """ self.validator.adata.obs.drop("organism_ontology_term_id", axis=1, inplace=True) self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: Dataframe 'obs' is missing column " "'organism_ontology_term_id'.", "ERROR: Checking values with dependencies failed for " "adata.obs['ethnicity_ontology_term_id'], this is likely due " "to missing dependent column in adata.obs.", "ERROR: Checking values with dependencies failed for " "adata.obs['development_stage_ontology_term_id'], this is likely due " "to missing dependent column in adata.obs.", ], ) def test_obsolete_term_id(self): """ Terms documented as obsolete in an ontology MUST NOT be used. For example, EFO:0009310 for obsolete_10x v2 was marked as obsolete in EFO version 3.31.0 and replaced by EFO:0009899 for 10x 3' v2. https://www.ebi.ac.uk/ols/ontologies/efo/terms?short_form=EFO_0009310 """ # Not a valid term self.validator.adata.obs["assay_ontology_term_id"][0] = "EFO:0009310" self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is a deprecated term id of 'EFO'.", "ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is not a child term id " "of '[['EFO:0002772', 'EFO:0010183']]'.", ], ) def test_assay_ontology_term_id(self): """ assay_ontology_term_id categorical with str categories. This MUST be an EFO term and either child of "EFO:0002772" or "EFO:0010183" If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to the most accurate term. For example, the sci-plex assay could be curated as "EFO:0010183 (sci-plex)" """ # Not a valid term self.validator.adata.obs["assay_ontology_term_id"][0] = "CL:000001" self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid " "ontology term id of 'EFO'.", "ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child " "term id of '[['EFO:0002772', 'EFO:0010183']]'.", ], ) # Not a valid child self.validator.adata.obs["assay_ontology_term_id"][0] = "EFO:0000001" self.validator.errors = [] self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a " "child term id of '[['EFO:0002772', 'EFO:0010183']]'." ], ) # Not a clarifying text self.validator.adata.obs["assay_ontology_term_id"][0] = "EFO:0010183 sci-plex" self.validator.errors = [] self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.", "ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of " "'[['EFO:0002772', 'EFO:0010183']]'.", ], ) def test_cell_type_ontology_term_id(self): """ cell_type_ontology_term_id categorical with str categories. This MUST be a CL term. """ # Not a valid term self.validator.adata.obs["cell_type_ontology_term_id"][0] = "EFO:0000001" self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid " "ontology term id of 'CL'." ], ) def test_development_stage_ontology_term_id_human(self): """ development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be "unknown". If organism_ontolology_term_id is "NCBITaxon:9606" for Homo sapiens, this MUST be the most accurate HsapDv term. """ self.validator.adata.obs["organism_ontology_term_id"][0] = "NCBITaxon:9606" self.validator.adata.obs["development_stage_ontology_term_id"][ 0 ] = "EFO:0000001" self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is " "not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' " "(Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown." ], ) def test_development_stage_ontology_term_id_mouse(self): """ If organism_ontolology_term_id is "NCBITaxon:10090" for Mus musculus, this MUST be the most accurate MmusDv term """ self.validator.adata.obs["organism_ontology_term_id"][0] = "NCBITaxon:10090" self.validator.adata.obs["development_stage_ontology_term_id"][ 0 ] = "EFO:0000001" self.validator.adata.obs["ethnicity_ontology_term_id"][0] = "na" self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is " "not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' " "(Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown." ], ) def test_development_stage_ontology_term_id_all_species(self): """ All other it MUST be children of UBERON:0000105 and not UBERON:0000071 """ # Fail case not an UBERON term self.validator.adata.obs["organism_ontology_term_id"][0] = "NCBITaxon:10114" self.validator.adata.obs["development_stage_ontology_term_id"][ 0 ] = "EFO:0000001" self.validator.adata.obs["ethnicity_ontology_term_id"][0] = "na" self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is " "not a valid ontology term id of 'UBERON'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' " "nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of " "'UBERON:0000105' excluding 'UBERON:0000071', or unknown.", "ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not " "a child term id of '[['UBERON:0000105']]'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' " "nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of " "'UBERON:0000105' excluding 'UBERON:0000071', or unknown.", ], ) # All other it MUST be children of UBERON:0000105 and not UBERON:0000071 # Fail case UBERON:0000071 self.validator.errors = [] self.validator.adata.obs["organism_ontology_term_id"][0] = "NCBITaxon:10114" self.validator.adata.obs["development_stage_ontology_term_id"][ 0 ] = "UBERON:0000071" self.validator.adata.obs["ethnicity_ontology_term_id"][0] = "na" self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: 'UBERON:0000071' in 'development_stage_ontology_term_id' is not allowed'. When " "'organism_ontology_term_id' is not 'NCBITaxon:10090' " "nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of " "'UBERON:0000105' excluding 'UBERON:0000071', or unknown.", ], ) def test_disease_ontology_term_id(self): """ disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or PATO:0000461 for normal or healthy. """ # Invalid ontology self.validator.adata.obs["disease_ontology_term_id"][0] = "EFO:0000001" self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a " "valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids." ], ) # Invalid PATO term id self.validator.errors = [] self.validator.adata.obs["disease_ontology_term_id"][0] = "PATO:0001894" self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. " "Only 'PATO:0000461' is allowed for 'PATO' term ids." ], ) def test_ethnicity_ontology_term_id(self): """ ethnicity_ontology_term_id categorical with str categories. If organism_ontolology_term_id is "NCBITaxon:9606" for Homo sapiens, this MUST be either a HANCESTRO term or "unknown" if unavailable. Otherwise, for all other organisms this MUST be "na". """ # If organism_ontolology_term_id is "NCBITaxon:9606" for Homo sapiens, # this MUST be either a HANCESTRO term or "unknown" if unavailable. self.validator.adata.obs["organism_ontology_term_id"][0] = "NCBITaxon:9606" self.validator.adata.obs["ethnicity_ontology_term_id"][0] = "EFO:0000001" self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is " "not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' " "(Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'." ], ) # Otherwise, for all other organisms this MUST be "na". Below is the test case for mouse data. # development_stage_ontology_term_id has to be set to an appropriate mouse term id, otherwise there # will be an error in that field. self.validator.errors = [] self.validator.adata.obs["organism_ontology_term_id"][0] = "NCBITaxon:10090" self.validator.adata.obs["development_stage_ontology_term_id"][ 0 ] = "MmusDv:0000003" self.validator.adata.obs["ethnicity_ontology_term_id"][0] = "EFO:0000001" self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a " "valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' " "(Homo sapiens), ethnicity_ontology_term_id MUST be 'na'." ], ) def test_organism_ontology_term_id(self): """ organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208. """ # Setting "organism_ontology_term_id" to "EFO:0000001" is the fail case. However since this represents neither # human nor mouse, then two other columns that are dependent on it need to be set appropriately to avoid # other error messages: "development_stage_ontology_term_id" and "ethnicity_ontology_term_id" self.validator.adata.obs["organism_ontology_term_id"][0] = "EFO:0000001" self.validator.adata.obs["development_stage_ontology_term_id"][0] = "unknown" self.validator.adata.obs["ethnicity_ontology_term_id"][0] = "na" self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid " "ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed." ], ) def test_tissue_ontology_term_id_base(self): """ tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue that this cell was derived from, depending on the type of biological sample: """ self.validator.adata.obs["tissue_ontology_term_id"][0] = "EFO:0000001" self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a " "valid ontology term id of 'UBERON, CL'." ], ) def test_tissue_ontology_term_id_cell_culture(self): """ Cell Culture - MUST be a CL term appended with " (cell culture)" """ self.validator.adata.obs["tissue_ontology_term_id"][ 0 ] = "CL:0000057 (CELL culture)" self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is " "not a valid ontology term id of 'UBERON, CL'." ], ) def test_tissue_ontology_term_id_organoid(self): """ Organoid - MUST be an UBERON term appended with " (organoid)" """ self.validator.adata.obs["tissue_ontology_term_id"][0] = "CL:0000057 (ORGANOID)" self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is " "not a valid ontology term id of 'UBERON, CL'." ], ) def test_sex_ontology_term_id(self): """ sex_ontology_term_id categorical with str categories. This MUST be a child of PATOPATO:0001894 for phenotypic sex or "unknown" if unavailable """ self.validator.adata.obs["sex_ontology_term_id"][0] = "EFO:0000001" self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is " "not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', " "or 'unknown' are allowed." ], ) def test_is_primary_data(self): """ is_primary_data bool. This MUST be True if this is the canonical instance of this cellular observation and False if not. This is commonly False for meta-analyses reusing data or for secondary views of data. """ self.validator.adata.obs["is_primary_data"] = "FALSE" self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: Column 'is_primary_data' in dataframe 'obs' " "must be boolean, not 'object'." ], ) class TestVar(unittest.TestCase): """ Fail cases in adata.var and adata.raw.var """ def setUp(self): self.validator = Validator() self.validator.adata = examples.adata.copy() def test_var_and_raw_var_same_index(self): """ var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index. """ # Swap first row for second one var = Validator.getattr_anndata(self.validator.adata, "var") # First swap the index new_index = list(var.index) tmp = new_index[0] new_index[0] = new_index[1] new_index[1] = tmp var.set_index(pd.Index(new_index), inplace=True) # Then swap the actual rows tmp = var.iloc[0, :].copy() var.iloc[0, :] = var.iloc[1, :].copy() var.iloc[1, :] = tmp self.validator.validate_adata() print("FOO", self.validator.errors) self.assertEqual( self.validator.errors, ["ERROR: Index of 'raw.var' is not identical to index of 'var'."], ) def test_check_unique_var(self): """ var.index MUST contain unique ENSEMBL gene identifiers for features. """ for component_name in ["var", "raw.var"]: with self.subTest(component_name=component_name): # Resetting validator self.validator.adata = examples.adata.copy() self.validator.errors = [] # Duplicate 1st row in var and assign it to 2nd component = Validator.getattr_anndata( self.validator.adata, component_name ) new_index = list(component.index) new_index[1] = new_index[0] component.set_index(pd.Index(new_index), inplace=True) component.iloc[1, :] = component.iloc[0, :] self.validator.validate_adata() self.assertEqual( self.validator.errors, [ f"ERROR: Column 'index' in dataframe '{component_name}' is not unique." ], ) def test_column_presence(self): """ var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe. feature_is_filtered must not be in raw.var, and it's only checked in var """ columns = ["feature_is_filtered", "feature_biotype"] for component_name in ["var", "raw.var"]: for column in columns: if column == "feature_is_filtered" and component_name == "raw.var": continue with self.subTest(component_name=component_name, column=column): # Resetting validator self.validator.errors = [] self.validator.adata = examples.adata.copy() component = Validator.getattr_anndata( self.validator.adata, component_name ) component.drop(column, axis=1, inplace=True) self.validator.validate_adata() self.assertEqual( self.validator.errors, [ f"ERROR: Dataframe '{component_name}' is missing " f"column '{column}'." ], ) def test_feature_is_filtered(self): """ feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X) but is present in the raw matrix (raw.X). The value for all cells of the given feature in the final matrix MUST be 0. Otherwise, this MUST be False. """ # Duplicate 1st row in var and assigned to 2nd self.validator.adata.var["feature_is_filtered"][0] = True for i in range(self.validator.adata.X.shape[0]): self.validator.adata.X[i, 0] = 0 self.validator.adata.X[0, 0] = 1 self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', " "but there are 1 non-zero values in the corresponding columns of the matrix 'X'. " "All values for these features must be 0." ], ) def test_columns_not_in_raw_var(self): """ Curators MUST annotate the following column only in the var dataframe. This column MUST NOT be present in raw.var: feature_is_filtered """ self.validator.adata.raw = self.validator.adata self.validator.adata.uns["X_normalization"] = "CPM" self.validator.validate_adata() self.assertEqual( self.validator.errors, ["ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'."], ) def test_feature_id_wrong_format(self): """ feature_id (var.index) str. If the feature_biotype is "gene" then this MUST be an ENSEMBL term. If the feature_biotype is "spike-in" then this MUST be an ERCC Spike-In identifier. This tests the case of an ID with an incorrect format "ENSEBML_NOGENE" """ for component_name in ["var", "raw.var"]: with self.subTest(component_name=component_name): # Resetting validator self.validator.adata = examples.adata.copy() self.validator.errors = [] component = Validator.getattr_anndata( self.validator.adata, component_name ) new_index = list(component.index) new_index[0] = "ENSEBML_NOGENE" component.set_index(pd.Index(new_index), inplace=True) component["feature_biotype"][0] = "gene" self.validator.validate_adata() self.assertEqual( self.validator.errors, [ f"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' " f"in '{component_name}', make sure it is a valid ID." ], ) def test_feature_id_non_existent_ensembl(self): """ feature_id (var.index) str. If the feature_biotype is "gene" then this MUST be an ENSEMBL term. If the feature_biotype is "spike-in" then this MUST be an ERCC Spike-In identifier. This tests the case of an ENSEMBL ID that has the right format but doesn't exist """ for component_name in ["var", "raw.var"]: with self.subTest(component_name=component_name): # Resetting validator self.validator.adata = examples.adata.copy() self.validator.errors = [] component = Validator.getattr_anndata( self.validator.adata, component_name ) new_index = list(component.index) new_index[0] = "ENSG000" component.set_index(pd.Index(new_index), inplace=True) component["feature_biotype"][0] = "gene" self.validator.validate_adata() self.assertEqual( self.validator.errors, [ f"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'." ], ) def test_feature_id_non_existent_ercc(self): """ feature_id (var.index) str. If the feature_biotype is "gene" then this MUST be an ENSEMBL term. If the feature_biotype is "spike-in" then this MUST be an ERCC Spike-In identifier. This tests the case of an ERCC ID that has the right format but doesn't exist """ for component_name in ["var", "raw.var"]: with self.subTest(component_name=component_name): # Resetting validator self.validator.adata = examples.adata.copy() self.validator.errors = [] component = Validator.getattr_anndata( self.validator.adata, component_name ) new_index = list(component.index) new_index[0] = "ERCC-000000" component.set_index(pd.Index(new_index), inplace=True) component["feature_biotype"][0] = "spike-in" self.validator.validate_adata() self.assertEqual( self.validator.errors, [ f"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'." ], ) class TestUns(unittest.TestCase): """ Fail cases in adata.uns """ def setUp(self): self.validator = Validator() self.validator.adata = examples.adata.copy() def test_required_fields_schema_version(self): """ Curators MUST annotate `schema_version` and values in uns (schema_version) """ del self.validator.adata.uns["schema_version"] self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: adata has no schema definition in 'adata.uns'. " "Validation cannot be performed." ], ) def test_required_fields_title(self): """ Curators MUST annotate `schema_version` and values in uns (title) """ del self.validator.adata.uns["title"] self.validator.validate_adata() self.assertEqual( self.validator.errors, ["ERROR: 'title' in 'uns' is not present."] ) def test_required_fields_X_normalization(self): """ Curators MUST annotate `schema_version` and values in uns (X_normalization) """ del self.validator.adata.uns["X_normalization"] self.validator.validate_adata() self.assertEqual( self.validator.errors, ["ERROR: 'X_normalization' in 'uns' is not present."] ) def test_leading_trailing_double_spaces_in_strings(self): """ The following sequences MUST NOT appear in str types documented in the schema: Leading control or space separators - ” This is an example” Trailing control or space separators - “This is an example ” Multiple (internal) control or space separators - "This is an example" """ self.validator.adata.uns["title"] = " There is a leading space" self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces." ], ) self.validator.adata.uns["title"] = "There is a trailing space " self.validator.errors = [] self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces." ], ) self.validator.adata.uns["title"] = "There are double spaces" self.validator.errors = [] self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces." ], ) def test_schema_version(self): """ Schema_version, This MUST be "2.0.0". """ self.validator.adata.uns["schema_version"] = "1.0.0" self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. " "Validation cannot be performed." ], ) def test_title(self): """ Title MUST be a string """ # list instead of string self.validator.adata.uns["title"] = ["title"] self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: '['title']' in 'uns['title']' is not valid, " "it must be a string." ], ) def test_X_normalization_is_str(self): """ X_normalization str. """ # list instead of string self.validator.adata.uns["X_normalization"] = ["normalization"] self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: '['normalization']' in 'uns['X_normalization']' is " "not valid, it must be a string." ], ) def test_X_normalization_not_raw(self): """ X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X. If data in X are raw, this SHOULD be "none". FAIL CASE for when X_normalization was set to "none" but X may not be raw data """ # Assign a real value to X while X_normalization is 'none' del self.validator.adata.raw self.validator.adata.uns["X_normalization"] = "none" self.validator.validate_adata() print("FOO", self.validator.warnings) self.assertEqual( self.validator.warnings, [ "WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear " "to have raw counts (integers)" ], ) def test_batch_condition_is_list(self): """ batch_condition list[str] """ # Check valid case of numpy array which is interchangeable with lists self.validator.adata.uns["batch_condition"] = numpy.array( self.validator.adata.uns["batch_condition"] ) self.validator.validate_adata() self.assertEqual(self.validator.errors, []) # Check fail case: not a list nor numpy array self.validator.adata.uns["batch_condition"] = "cell_type_ontology_term_id" self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' " "is not valid, it must be a list or numpy array." ], ) def test_batch_condition_is_column_from_obs(self): """ batch_condition list[str]. str values MUST refer to cell metadata keys in obs. """ self.validator.adata.uns["batch_condition"] = ["NO_COLUMN"] self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a " "column in 'adata.obs'." ], ) def test_default_embedding_is_str(self): """ Default_embedding str. """ self.validator.adata.uns["default_embedding"] = ["X_umap"] self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, " "it must be a string." ], ) def test_default_embedding_is_key_from_obsm(self): """ Default_embedding str. The value MUST match a key to an embedding in obsm """ self.validator.adata.uns["default_embedding"] = "X_other" self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: 'X_other' in 'uns['default_embedding']' is not valid, " "it must be a key of 'adata.obsm'." ], ) def test_X_approximate_distribution_is_str(self): """ X_approximate_distribution str. The value MUST be "count" [...] or "normal". Note that `normal` is tested in the happy path test case using `good_uns`. """ # Check valid case of "count" which is not included in valid object self.validator.adata.uns["X_approximate_distribution"] = "count" self.validator.validate_adata() self.assertEqual(self.validator.errors, []) # Invalid type: list self.validator.adata.uns["X_approximate_distribution"] = ["count"] self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: '['count']' in 'uns['X_approximate_distribution']' " "is not valid, it must be a string." ], ) def test_X_approximate_distribution_is_valid(self): """ X_approximate_distribution str. The value MUST be "count" [...] or "normal" """ self.validator.adata.uns["X_approximate_distribution"] = "COUNT" self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is " "not valid. Allowed terms: ['count', 'normal']." ], ) class TestObsm(unittest.TestCase): """ Fail cases for adata.obsm """ def setUp(self): self.validator = Validator() self.validator.adata = examples.adata.copy() def test_obsm_values_ara_numpy(self): """ values in obsm MUST be a numpy.ndarray """ self.validator.adata.obsm["X_tsne"] = pd.DataFrame( self.validator.adata.obsm["X_umap"], index=self.validator.adata.obs_names ) self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: All embeddings have to be of 'numpy.ndarray' type, " "'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>')." ], ) def test_obsm_values_at_least_one_X(self): """ At least one key for the embedding MUST be prefixed with "X_" """ self.validator.adata.obsm["umap"] = self.validator.adata.obsm["X_umap"] self.validator.adata.uns["default_embedding"] = "umap" del self.validator.adata.obsm["X_umap"] self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: At least one embedding in 'obsm' has to have a " "key with an 'X_' prefix." ], ) def test_obsm_shape(self): """ Curators MUST annotate one or more two-dimensional (m >= 2) embeddings """ # Makes 1 column array self.validator.adata.obsm["X_umap"] = numpy.delete( self.validator.adata.obsm["X_umap"], 0, 1 ) self.validator.validate_adata() self.assertEqual( self.validator.errors, [ "ERROR: All embeddings must have as many rows as cells, and " "at least two columns.'adata.obsm['X_umap']' has shape " "of '(2, 1)'." ], ) class TestAddingLabels(unittest.TestCase): """ Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually created dataframes (positive control) against the ones produced by the validator """ @classmethod def setUpClass(cls): # Manually created data (positive control) cls.adata_with_labels = examples.adata_with_labels # Validate test data validator = Validator() validator.adata = examples.adata.copy() validator.validate_adata() # Add labels through validator cls.label_writer = AnnDataLabelAppender(validator) cls.label_writer._add_labels() def test_var_added_labels(self): """ When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism to the var dataframe. Curators MUST NOT annotate the following columns: - feature_name. If the feature_biotype is "gene" then this MUST be the human-readable ENSEMBL gene name assigned to the feature_id. If the feature_biotype is "spike-in" then this MUST be the ERCC Spike-In identifier appended with " spike-in control". - feature_reference. This MUST be the reference organism for a feature: Homo sapiens "NCBITaxon:9606" Mus musculus "NCBITaxon:10090" SARS-CoV-2 "NCBITaxon:2697049" ERCC Spike-Ins "NCBITaxon:32630" """ for column in ["feature_name", "feature_reference"]: expected_column = self.adata_with_labels.var[column] obtained_column = self.label_writer.adata.var[column] for i, j in zip(expected_column.tolist(), obtained_column.tolist()): with self.subTest(i=i, j=j): self.assertEqual(i, j) def test_obs_added_labels(self): """ When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable name for the corresponding ontology term to the obs dataframe. Curators MUST NOT annotate the following columns. - assay. categorical with str categories. This MUST be the human-readable name assigned to the value of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to assay_ontology_term_id MUST be appended to assay. - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value of cell_type_ontology_term_id. - development_stage. categorical with str categories. This MUST be "unknown" if set in development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to the value of development_stage_ontology_term_id. - disease. categorical with str categories. This MUST be the human-readable name assigned to the value of disease_ontology_term_id. - ethnicity. categorical with str categories. This MUST be "na" or "unknown" if set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable name assigned to the value of ethnicity_ontology_term_id. - organism. categorical with str categories. This MUST be the human-readable name assigned to the value of organism_ontology_term_id. - sex. categorical with str categories. This MUST be "unknown" if set in sex_ontology_term_id; otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id. - tissue. categorical with str categories. This MUST be the human-readable name assigned to the value of tissue_ontology_term_id. " (cell culture)" or " (organoid)" MUST be appended if present in tissue_ontology_term_id. """ for column in [ "assay", "cell_type", "development_stage", "disease", "ethnicity", "organism", "sex", "tissue", ]: expected_column = self.adata_with_labels.obs[column] obtained_column = self.label_writer.adata.obs[column] for i, j in zip(expected_column.tolist(), obtained_column.tolist()): with self.subTest(i=i, j=j): self.assertEqual(i, j)
[ "import unittest\n\nimport numpy\nimport pandas as pd\n\nimport fixtures.examples_validate as examples\nfrom cellxgene_schema.validate import Validator\nfrom cellxgene_schema.write_labels import AnnDataLabelAppender\n\n\n# Tests for schema compliance of an AnnData object\n\n\nclass TestValidAnndata(unittest.TestCase):\n\n \"\"\"\n Tests a valid AnnData object. Most other tests below modify this AnnData object and test for failure cases.\n\n The valid AnnData object has all valid cases described in the schema.\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_valid_anndata(self):\n self.validator.validate_adata()\n self.assertFalse(self.validator.errors)\n\n\nclass TestH5adValidation(unittest.TestCase):\n\n \"\"\"\n Checks that validation from h5ad works, only does one invalid example as extensive testing is done in the classes\n below\n \"\"\"\n\n def setUp(self):\n self.h5ad_valid_file = examples.h5ad_valid\n self.h5ad_invalid_file = examples.h5ad_invalid\n self.validator = Validator()\n\n def test_validate(self):\n\n # Valid h5ad\n self.assertTrue(self.validator.validate_adata(self.h5ad_valid_file))\n\n # Invalid h5ads\n self.assertFalse(self.validator.validate_adata(self.h5ad_invalid_file))\n\n\nclass TestExpressionMatrix(unittest.TestCase):\n\n \"\"\"\n Fail cases for expression matrices (anndata.X and anndata.raw.X)\n \"\"\"\n\n def setUp(self):\n\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_shapes(self):\n\n \"\"\"\n All matrix layers MUST have the same shape, and have the same cell labels and gene labels.\n \"\"\"\n\n # Creates a raw layer\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.raw.var.drop(\"feature_is_filtered\", axis=1, inplace=True)\n self.validator.adata.X = examples.adata_non_raw.X.copy()\n self.validator.adata.uns[\"X_normalization\"] = \"CPM\"\n\n # remove one gene\n self.validator.adata = self.validator.adata[:, 1:]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\"ERROR: Number of genes in X (3) is different than raw.X (4).\"],\n )\n\n def test_sparsity(self):\n\n \"\"\"\n In any layer, if a matrix has 50% or more values that are zeros, it is STRONGLY RECOMMENDED that\n the matrix be encoded as a scipy.sparse.csr_matrix\n \"\"\"\n\n self.validator.adata.X = self.validator.adata.X.toarray()\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.warnings,\n [\n \"WARNING: Sparsity of 'X' is 0.875 which is greater than 0.5, \"\n \"and it is not a 'scipy.sparse.csr_matrix'. It is \"\n \"STRONGLY RECOMMENDED to use this type of matrix for \"\n \"the given sparsity.\"\n ],\n )\n\n def test_raw_existence(self):\n\n \"\"\"\n Except for ATAC-seq and methylation data, raw data is REQUIRED\n \"\"\"\n\n # RNA - raw layer required\n del self.validator.adata.raw\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Raw data is missing: there is no 'raw.X' and 'X_normalization' is not 'none'.\"\n ],\n )\n\n # ATAC - raw layer not required\n # The assignment above makes X to not be raw: self.validator.adata.uns[\"X_normalization\"] = \"CPM\"\n # The following line makes it to be scATAC-seq data (EFO:0010891)\n # Missing raw data in atac-seq data is allowed, thus the following should not return an error message\n self.validator.errors = []\n self.validator.adata.obs[\"assay_ontology_term_id\"] = \"EFO:0010891\"\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n\n def test_final_strongly_recommended(self):\n\n \"\"\"\n Except for ATAC-seq and methylation data, final matrix is STRONGLY RECOMMENDED\n \"\"\"\n\n # move raw to X amd: i.e. there is no final\n self.validator.adata.X = self.validator.adata.raw.X\n del self.validator.adata.raw\n self.validator.adata.uns[\"X_normalization\"] = \"none\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.warnings,\n [\n \"WARNING: Only raw data was found, i.e. there is no 'raw.X' and 'uns['X_normalization']' is 'none'. \"\n \"It is STRONGLY RECOMMENDED that 'final' (normalized) data is provided.\"\n ],\n )\n\n\nclass TestObs(unittest.TestCase):\n\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_column_presence(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n \"\"\"\n\n columns = [\n \"assay_ontology_term_id\",\n \"development_stage_ontology_term_id\",\n \"disease_ontology_term_id\",\n \"ethnicity_ontology_term_id\",\n \"is_primary_data\",\n \"sex_ontology_term_id\",\n \"tissue_ontology_term_id\",\n ]\n\n for column in columns:\n with self.subTest(column=column):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n\n self.validator.adata.obs.drop(column, axis=1, inplace=True)\n # Remove batch condition because it has a dependency with is_primary_data\n self.validator.adata.uns.pop(\"batch_condition\")\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [f\"ERROR: Dataframe 'obs' is missing \" f\"column '{column}'.\"],\n )\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n\n self.validator.adata.obs.drop(\"organism_ontology_term_id\", axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Dataframe 'obs' is missing column \"\n \"'organism_ontology_term_id'.\",\n \"ERROR: Checking values with dependencies failed for \"\n \"adata.obs['ethnicity_ontology_term_id'], this is likely due \"\n \"to missing dependent column in adata.obs.\",\n \"ERROR: Checking values with dependencies failed for \"\n \"adata.obs['development_stage_ontology_term_id'], this is likely due \"\n \"to missing dependent column in adata.obs.\",\n ],\n )\n\n def test_obsolete_term_id(self):\n \"\"\"\n Terms documented as obsolete in an ontology MUST NOT be used. For example, EFO:0009310\n for obsolete_10x v2 was marked as obsolete in EFO version 3.31.0 and replaced by\n EFO:0009899 for 10x 3' v2.\n\n https://www.ebi.ac.uk/ols/ontologies/efo/terms?short_form=EFO_0009310\n \"\"\"\n\n # Not a valid term\n self.validator.adata.obs[\"assay_ontology_term_id\"][0] = \"EFO:0009310\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is a deprecated term id of 'EFO'.\",\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is not a child term id \"\n \"of '[['EFO:0002772', 'EFO:0010183']]'.\",\n ],\n )\n\n def test_assay_ontology_term_id(self):\n\n \"\"\"\n assay_ontology_term_id categorical with str categories.\n This MUST be an EFO term and either child of \"EFO:0002772\" or \"EFO:0010183\"\n If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to\n the most accurate term. For example, the sci-plex assay could be curated as \"EFO:0010183 (sci-plex)\"\n \"\"\"\n\n # Not a valid term\n self.validator.adata.obs[\"assay_ontology_term_id\"][0] = \"CL:000001\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid \"\n \"ontology term id of 'EFO'.\",\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child \"\n \"term id of '[['EFO:0002772', 'EFO:0010183']]'.\",\n ],\n )\n\n # Not a valid child\n self.validator.adata.obs[\"assay_ontology_term_id\"][0] = \"EFO:0000001\"\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a \"\n \"child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ],\n )\n\n # Not a clarifying text\n self.validator.adata.obs[\"assay_ontology_term_id\"][0] = \"EFO:0010183 sci-plex\"\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\",\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of \"\n \"'[['EFO:0002772', 'EFO:0010183']]'.\",\n ],\n )\n\n def test_cell_type_ontology_term_id(self):\n\n \"\"\"\n cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.\n \"\"\"\n\n # Not a valid term\n self.validator.adata.obs[\"cell_type_ontology_term_id\"][0] = \"EFO:0000001\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid \"\n \"ontology term id of 'CL'.\"\n ],\n )\n\n def test_development_stage_ontology_term_id_human(self):\n\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n\n self.validator.adata.obs[\"organism_ontology_term_id\"][0] = \"NCBITaxon:9606\"\n self.validator.adata.obs[\"development_stage_ontology_term_id\"][\n 0\n ] = \"EFO:0000001\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is \"\n \"not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' \"\n \"(Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ],\n )\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n\n self.validator.adata.obs[\"organism_ontology_term_id\"][0] = \"NCBITaxon:10090\"\n self.validator.adata.obs[\"development_stage_ontology_term_id\"][\n 0\n ] = \"EFO:0000001\"\n self.validator.adata.obs[\"ethnicity_ontology_term_id\"][0] = \"na\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is \"\n \"not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' \"\n \"(Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ],\n )\n\n def test_development_stage_ontology_term_id_all_species(self):\n\n \"\"\"\n All other it MUST be children of UBERON:0000105 and not UBERON:0000071\n \"\"\"\n\n # Fail case not an UBERON term\n self.validator.adata.obs[\"organism_ontology_term_id\"][0] = \"NCBITaxon:10114\"\n self.validator.adata.obs[\"development_stage_ontology_term_id\"][\n 0\n ] = \"EFO:0000001\"\n self.validator.adata.obs[\"ethnicity_ontology_term_id\"][0] = \"na\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is \"\n \"not a valid ontology term id of 'UBERON'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' \"\n \"nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of \"\n \"'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\",\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not \"\n \"a child term id of '[['UBERON:0000105']]'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' \"\n \"nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of \"\n \"'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\",\n ],\n )\n\n # All other it MUST be children of UBERON:0000105 and not UBERON:0000071\n # Fail case UBERON:0000071\n self.validator.errors = []\n self.validator.adata.obs[\"organism_ontology_term_id\"][0] = \"NCBITaxon:10114\"\n self.validator.adata.obs[\"development_stage_ontology_term_id\"][\n 0\n ] = \"UBERON:0000071\"\n self.validator.adata.obs[\"ethnicity_ontology_term_id\"][0] = \"na\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'UBERON:0000071' in 'development_stage_ontology_term_id' is not allowed'. When \"\n \"'organism_ontology_term_id' is not 'NCBITaxon:10090' \"\n \"nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of \"\n \"'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\",\n ],\n )\n\n def test_disease_ontology_term_id(self):\n\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n\n # Invalid ontology\n self.validator.adata.obs[\"disease_ontology_term_id\"][0] = \"EFO:0000001\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a \"\n \"valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ],\n )\n\n # Invalid PATO term id\n self.validator.errors = []\n self.validator.adata.obs[\"disease_ontology_term_id\"][0] = \"PATO:0001894\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. \"\n \"Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ],\n )\n\n def test_ethnicity_ontology_term_id(self):\n\n \"\"\"\n ethnicity_ontology_term_id categorical with str categories.\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n Otherwise, for all other organisms this MUST be \"na\".\n \"\"\"\n\n # If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n # this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n self.validator.adata.obs[\"organism_ontology_term_id\"][0] = \"NCBITaxon:9606\"\n self.validator.adata.obs[\"ethnicity_ontology_term_id\"][0] = \"EFO:0000001\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is \"\n \"not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' \"\n \"(Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'.\"\n ],\n )\n\n # Otherwise, for all other organisms this MUST be \"na\". Below is the test case for mouse data.\n # development_stage_ontology_term_id has to be set to an appropriate mouse term id, otherwise there\n # will be an error in that field.\n self.validator.errors = []\n self.validator.adata.obs[\"organism_ontology_term_id\"][0] = \"NCBITaxon:10090\"\n self.validator.adata.obs[\"development_stage_ontology_term_id\"][\n 0\n ] = \"MmusDv:0000003\"\n self.validator.adata.obs[\"ethnicity_ontology_term_id\"][0] = \"EFO:0000001\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a \"\n \"valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' \"\n \"(Homo sapiens), ethnicity_ontology_term_id MUST be 'na'.\"\n ],\n )\n\n def test_organism_ontology_term_id(self):\n\n \"\"\"\n organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.\n \"\"\"\n\n # Setting \"organism_ontology_term_id\" to \"EFO:0000001\" is the fail case. However since this represents neither\n # human nor mouse, then two other columns that are dependent on it need to be set appropriately to avoid\n # other error messages: \"development_stage_ontology_term_id\" and \"ethnicity_ontology_term_id\"\n self.validator.adata.obs[\"organism_ontology_term_id\"][0] = \"EFO:0000001\"\n self.validator.adata.obs[\"development_stage_ontology_term_id\"][0] = \"unknown\"\n self.validator.adata.obs[\"ethnicity_ontology_term_id\"][0] = \"na\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid \"\n \"ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed.\"\n ],\n )\n\n def test_tissue_ontology_term_id_base(self):\n\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n\n self.validator.adata.obs[\"tissue_ontology_term_id\"][0] = \"EFO:0000001\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a \"\n \"valid ontology term id of 'UBERON, CL'.\"\n ],\n )\n\n def test_tissue_ontology_term_id_cell_culture(self):\n\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n\n self.validator.adata.obs[\"tissue_ontology_term_id\"][\n 0\n ] = \"CL:0000057 (CELL culture)\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is \"\n \"not a valid ontology term id of 'UBERON, CL'.\"\n ],\n )\n\n def test_tissue_ontology_term_id_organoid(self):\n\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n\n self.validator.adata.obs[\"tissue_ontology_term_id\"][0] = \"CL:0000057 (ORGANOID)\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is \"\n \"not a valid ontology term id of 'UBERON, CL'.\"\n ],\n )\n\n def test_sex_ontology_term_id(self):\n\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n\n self.validator.adata.obs[\"sex_ontology_term_id\"][0] = \"EFO:0000001\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is \"\n \"not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', \"\n \"or 'unknown' are allowed.\"\n ],\n )\n\n def test_is_primary_data(self):\n\n \"\"\"\n is_primary_data\tbool. This MUST be True if this is the canonical instance of this cellular\n observation and False if not. This is commonly False\n for meta-analyses reusing data or for secondary views of data.\n \"\"\"\n\n self.validator.adata.obs[\"is_primary_data\"] = \"FALSE\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Column 'is_primary_data' in dataframe 'obs' \"\n \"must be boolean, not 'object'.\"\n ],\n )\n\n\nclass TestVar(unittest.TestCase):\n\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n\n # Swap first row for second one\n var = Validator.getattr_anndata(self.validator.adata, \"var\")\n\n # First swap the index\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n\n # Then swap the actual rows\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n\n self.validator.validate_adata()\n print(\"FOO\", self.validator.errors)\n self.assertEqual(\n self.validator.errors,\n [\"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"],\n )\n\n def test_check_unique_var(self):\n\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n\n for component_name in [\"var\", \"raw.var\"]:\n with self.subTest(component_name=component_name):\n\n # Resetting validator\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n\n # Duplicate 1st row in var and assign it to 2nd\n component = Validator.getattr_anndata(\n self.validator.adata, component_name\n )\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ],\n )\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n\n columns = [\"feature_is_filtered\", \"feature_biotype\"]\n\n for component_name in [\"var\", \"raw.var\"]:\n for column in columns:\n if column == \"feature_is_filtered\" and component_name == \"raw.var\":\n continue\n with self.subTest(component_name=component_name, column=column):\n\n # Resetting validator\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n\n component = Validator.getattr_anndata(\n self.validator.adata, component_name\n )\n component.drop(column, axis=1, inplace=True)\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n f\"ERROR: Dataframe '{component_name}' is missing \"\n f\"column '{column}'.\"\n ],\n )\n\n def test_feature_is_filtered(self):\n\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n\n # Duplicate 1st row in var and assigned to 2nd\n self.validator.adata.var[\"feature_is_filtered\"][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', \"\n \"but there are 1 non-zero values in the corresponding columns of the matrix 'X'. \"\n \"All values for these features must be 0.\"\n ],\n )\n\n def test_columns_not_in_raw_var(self):\n\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns[\"X_normalization\"] = \"CPM\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"],\n )\n\n def test_feature_id_wrong_format(self):\n\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n\n for component_name in [\"var\", \"raw.var\"]:\n with self.subTest(component_name=component_name):\n\n # Resetting validator\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n\n component = Validator.getattr_anndata(\n self.validator.adata, component_name\n )\n\n new_index = list(component.index)\n new_index[0] = \"ENSEBML_NOGENE\"\n component.set_index(pd.Index(new_index), inplace=True)\n component[\"feature_biotype\"][0] = \"gene\"\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' \"\n f\"in '{component_name}', make sure it is a valid ID.\"\n ],\n )\n\n def test_feature_id_non_existent_ensembl(self):\n\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n\n for component_name in [\"var\", \"raw.var\"]:\n with self.subTest(component_name=component_name):\n # Resetting validator\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n\n component = Validator.getattr_anndata(\n self.validator.adata, component_name\n )\n\n new_index = list(component.index)\n new_index[0] = \"ENSG000\"\n component.set_index(pd.Index(new_index), inplace=True)\n component[\"feature_biotype\"][0] = \"gene\"\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ],\n )\n\n def test_feature_id_non_existent_ercc(self):\n\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n\n for component_name in [\"var\", \"raw.var\"]:\n with self.subTest(component_name=component_name):\n # Resetting validator\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n\n component = Validator.getattr_anndata(\n self.validator.adata, component_name\n )\n\n new_index = list(component.index)\n new_index[0] = \"ERCC-000000\"\n component.set_index(pd.Index(new_index), inplace=True)\n component[\"feature_biotype\"][0] = \"spike-in\"\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ],\n )\n\n\nclass TestUns(unittest.TestCase):\n\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n\n del self.validator.adata.uns[\"schema_version\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: adata has no schema definition in 'adata.uns'. \"\n \"Validation cannot be performed.\"\n ],\n )\n\n def test_required_fields_title(self):\n\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n\n del self.validator.adata.uns[\"title\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors, [\"ERROR: 'title' in 'uns' is not present.\"]\n )\n\n def test_required_fields_X_normalization(self):\n\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n\n del self.validator.adata.uns[\"X_normalization\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors, [\"ERROR: 'X_normalization' in 'uns' is not present.\"]\n )\n\n def test_leading_trailing_double_spaces_in_strings(self):\n\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n\n self.validator.adata.uns[\"title\"] = \" There is a leading space\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ],\n )\n\n self.validator.adata.uns[\"title\"] = \"There is a trailing space \"\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ],\n )\n\n self.validator.adata.uns[\"title\"] = \"There are double spaces\"\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ],\n )\n\n def test_schema_version(self):\n\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n\n self.validator.adata.uns[\"schema_version\"] = \"1.0.0\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. \"\n \"Validation cannot be performed.\"\n ],\n )\n\n def test_title(self):\n\n \"\"\"\n Title MUST be a string\n \"\"\"\n\n # list instead of string\n self.validator.adata.uns[\"title\"] = [\"title\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: '['title']' in 'uns['title']' is not valid, \"\n \"it must be a string.\"\n ],\n )\n\n def test_X_normalization_is_str(self):\n\n \"\"\"\n X_normalization str.\n \"\"\"\n\n # list instead of string\n self.validator.adata.uns[\"X_normalization\"] = [\"normalization\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is \"\n \"not valid, it must be a string.\"\n ],\n )\n\n def test_X_normalization_not_raw(self):\n\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n\n # Assign a real value to X while X_normalization is 'none'\n del self.validator.adata.raw\n self.validator.adata.uns[\"X_normalization\"] = \"none\"\n self.validator.validate_adata()\n print(\"FOO\", self.validator.warnings)\n self.assertEqual(\n self.validator.warnings,\n [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear \"\n \"to have raw counts (integers)\"\n ],\n )\n\n def test_batch_condition_is_list(self):\n\n \"\"\"\n batch_condition list[str]\n \"\"\"\n\n # Check valid case of numpy array which is interchangeable with lists\n self.validator.adata.uns[\"batch_condition\"] = numpy.array(\n self.validator.adata.uns[\"batch_condition\"]\n )\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n\n # Check fail case: not a list nor numpy array\n self.validator.adata.uns[\"batch_condition\"] = \"cell_type_ontology_term_id\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' \"\n \"is not valid, it must be a list or numpy array.\"\n ],\n )\n\n def test_batch_condition_is_column_from_obs(self):\n\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n\n self.validator.adata.uns[\"batch_condition\"] = [\"NO_COLUMN\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a \"\n \"column in 'adata.obs'.\"\n ],\n )\n\n def test_default_embedding_is_str(self):\n\n \"\"\"\n Default_embedding str.\n \"\"\"\n\n self.validator.adata.uns[\"default_embedding\"] = [\"X_umap\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, \"\n \"it must be a string.\"\n ],\n )\n\n def test_default_embedding_is_key_from_obsm(self):\n\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n\n self.validator.adata.uns[\"default_embedding\"] = \"X_other\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, \"\n \"it must be a key of 'adata.obsm'.\"\n ],\n )\n\n def test_X_approximate_distribution_is_str(self):\n\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n\n # Check valid case of \"count\" which is not included in valid object\n self.validator.adata.uns[\"X_approximate_distribution\"] = \"count\"\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n\n # Invalid type: list\n self.validator.adata.uns[\"X_approximate_distribution\"] = [\"count\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' \"\n \"is not valid, it must be a string.\"\n ],\n )\n\n def test_X_approximate_distribution_is_valid(self):\n\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n\n self.validator.adata.uns[\"X_approximate_distribution\"] = \"COUNT\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is \"\n \"not valid. Allowed terms: ['count', 'normal'].\"\n ],\n )\n\n\nclass TestObsm(unittest.TestCase):\n\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n\n self.validator.adata.obsm[\"X_tsne\"] = pd.DataFrame(\n self.validator.adata.obsm[\"X_umap\"], index=self.validator.adata.obs_names\n )\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, \"\n \"'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ],\n )\n\n def test_obsm_values_at_least_one_X(self):\n\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n\n self.validator.adata.obsm[\"umap\"] = self.validator.adata.obsm[\"X_umap\"]\n self.validator.adata.uns[\"default_embedding\"] = \"umap\"\n del self.validator.adata.obsm[\"X_umap\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: At least one embedding in 'obsm' has to have a \"\n \"key with an 'X_' prefix.\"\n ],\n )\n\n def test_obsm_shape(self):\n\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n\n # Makes 1 column array\n self.validator.adata.obsm[\"X_umap\"] = numpy.delete(\n self.validator.adata.obsm[\"X_umap\"], 0, 1\n )\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: All embeddings must have as many rows as cells, and \"\n \"at least two columns.'adata.obsm['X_umap']' has shape \"\n \"of '(2, 1)'.\"\n ],\n )\n\n\nclass TestAddingLabels(unittest.TestCase):\n\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n\n # Manually created data (positive control)\n cls.adata_with_labels = examples.adata_with_labels\n\n # Validate test data\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n\n # Add labels through validator\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n\n for column in [\"feature_name\", \"feature_reference\"]:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n\n for column in [\n \"assay\",\n \"cell_type\",\n \"development_stage\",\n \"disease\",\n \"ethnicity\",\n \"organism\",\n \"sex\",\n \"tissue\",\n ]:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "import unittest\nimport numpy\nimport pandas as pd\nimport fixtures.examples_validate as examples\nfrom cellxgene_schema.validate import Validator\nfrom cellxgene_schema.write_labels import AnnDataLabelAppender\n\n\nclass TestValidAnndata(unittest.TestCase):\n \"\"\"\n Tests a valid AnnData object. Most other tests below modify this AnnData object and test for failure cases.\n\n The valid AnnData object has all valid cases described in the schema.\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_valid_anndata(self):\n self.validator.validate_adata()\n self.assertFalse(self.validator.errors)\n\n\nclass TestH5adValidation(unittest.TestCase):\n \"\"\"\n Checks that validation from h5ad works, only does one invalid example as extensive testing is done in the classes\n below\n \"\"\"\n\n def setUp(self):\n self.h5ad_valid_file = examples.h5ad_valid\n self.h5ad_invalid_file = examples.h5ad_invalid\n self.validator = Validator()\n\n def test_validate(self):\n self.assertTrue(self.validator.validate_adata(self.h5ad_valid_file))\n self.assertFalse(self.validator.validate_adata(self.h5ad_invalid_file))\n\n\nclass TestExpressionMatrix(unittest.TestCase):\n \"\"\"\n Fail cases for expression matrices (anndata.X and anndata.raw.X)\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_shapes(self):\n \"\"\"\n All matrix layers MUST have the same shape, and have the same cell labels and gene labels.\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.raw.var.drop('feature_is_filtered', axis=1,\n inplace=True)\n self.validator.adata.X = examples.adata_non_raw.X.copy()\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.adata = self.validator.adata[:, 1:]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n 'ERROR: Number of genes in X (3) is different than raw.X (4).'])\n\n def test_sparsity(self):\n \"\"\"\n In any layer, if a matrix has 50% or more values that are zeros, it is STRONGLY RECOMMENDED that\n the matrix be encoded as a scipy.sparse.csr_matrix\n \"\"\"\n self.validator.adata.X = self.validator.adata.X.toarray()\n self.validator.validate_adata()\n self.assertEqual(self.validator.warnings, [\n \"WARNING: Sparsity of 'X' is 0.875 which is greater than 0.5, and it is not a 'scipy.sparse.csr_matrix'. It is STRONGLY RECOMMENDED to use this type of matrix for the given sparsity.\"\n ])\n\n def test_raw_existence(self):\n \"\"\"\n Except for ATAC-seq and methylation data, raw data is REQUIRED\n \"\"\"\n del self.validator.adata.raw\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Raw data is missing: there is no 'raw.X' and 'X_normalization' is not 'none'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['assay_ontology_term_id'] = 'EFO:0010891'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n\n def test_final_strongly_recommended(self):\n \"\"\"\n Except for ATAC-seq and methylation data, final matrix is STRONGLY RECOMMENDED\n \"\"\"\n self.validator.adata.X = self.validator.adata.raw.X\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n self.assertEqual(self.validator.warnings, [\n \"WARNING: Only raw data was found, i.e. there is no 'raw.X' and 'uns['X_normalization']' is 'none'. It is STRONGLY RECOMMENDED that 'final' (normalized) data is provided.\"\n ])\n\n\nclass TestObs(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_column_presence(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n \"\"\"\n columns = ['assay_ontology_term_id',\n 'development_stage_ontology_term_id',\n 'disease_ontology_term_id', 'ethnicity_ontology_term_id',\n 'is_primary_data', 'sex_ontology_term_id',\n 'tissue_ontology_term_id']\n for column in columns:\n with self.subTest(column=column):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n self.validator.adata.obs.drop(column, axis=1, inplace=True)\n self.validator.adata.uns.pop('batch_condition')\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe 'obs' is missing column '{column}'.\"])\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n\n def test_obsolete_term_id(self):\n \"\"\"\n Terms documented as obsolete in an ontology MUST NOT be used. For example, EFO:0009310\n for obsolete_10x v2 was marked as obsolete in EFO version 3.31.0 and replaced by\n EFO:0009899 for 10x 3' v2.\n\n https://www.ebi.ac.uk/ols/ontologies/efo/terms?short_form=EFO_0009310\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0009310'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is a deprecated term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_assay_ontology_term_id(self):\n \"\"\"\n assay_ontology_term_id categorical with str categories.\n This MUST be an EFO term and either child of \"EFO:0002772\" or \"EFO:0010183\"\n If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to\n the most accurate term. For example, the sci-plex assay could be curated as \"EFO:0010183 (sci-plex)\"\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'CL:000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0\n ] = 'EFO:0010183 sci-plex'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_cell_type_ontology_term_id(self):\n \"\"\"\n cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.\n \"\"\"\n self.validator.adata.obs['cell_type_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid ontology term id of 'CL'.\"\n ])\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_all_species(self):\n \"\"\"\n All other it MUST be children of UBERON:0000105 and not UBERON:0000071\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'UBERON'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ,\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a child term id of '[['UBERON:0000105']]'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'UBERON:0000071'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'UBERON:0000071' in 'development_stage_ontology_term_id' is not allowed'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n\n def test_ethnicity_ontology_term_id(self):\n \"\"\"\n ethnicity_ontology_term_id categorical with str categories.\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n Otherwise, for all other organisms this MUST be \"na\".\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'MmusDv:0000003'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be 'na'.\"\n ])\n\n def test_organism_ontology_term_id(self):\n \"\"\"\n organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'unknown'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed.\"\n ])\n\n def test_tissue_ontology_term_id_base(self):\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n\n def test_is_primary_data(self):\n \"\"\"\n is_primary_data\tbool. This MUST be True if this is the canonical instance of this cellular\n observation and False if not. This is commonly False\n for meta-analyses reusing data or for secondary views of data.\n \"\"\"\n self.validator.adata.obs['is_primary_data'] = 'FALSE'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'is_primary_data' in dataframe 'obs' must be boolean, not 'object'.\"\n ])\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n\n\nclass TestValidAnndata(unittest.TestCase):\n \"\"\"\n Tests a valid AnnData object. Most other tests below modify this AnnData object and test for failure cases.\n\n The valid AnnData object has all valid cases described in the schema.\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_valid_anndata(self):\n self.validator.validate_adata()\n self.assertFalse(self.validator.errors)\n\n\nclass TestH5adValidation(unittest.TestCase):\n \"\"\"\n Checks that validation from h5ad works, only does one invalid example as extensive testing is done in the classes\n below\n \"\"\"\n\n def setUp(self):\n self.h5ad_valid_file = examples.h5ad_valid\n self.h5ad_invalid_file = examples.h5ad_invalid\n self.validator = Validator()\n\n def test_validate(self):\n self.assertTrue(self.validator.validate_adata(self.h5ad_valid_file))\n self.assertFalse(self.validator.validate_adata(self.h5ad_invalid_file))\n\n\nclass TestExpressionMatrix(unittest.TestCase):\n \"\"\"\n Fail cases for expression matrices (anndata.X and anndata.raw.X)\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_shapes(self):\n \"\"\"\n All matrix layers MUST have the same shape, and have the same cell labels and gene labels.\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.raw.var.drop('feature_is_filtered', axis=1,\n inplace=True)\n self.validator.adata.X = examples.adata_non_raw.X.copy()\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.adata = self.validator.adata[:, 1:]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n 'ERROR: Number of genes in X (3) is different than raw.X (4).'])\n\n def test_sparsity(self):\n \"\"\"\n In any layer, if a matrix has 50% or more values that are zeros, it is STRONGLY RECOMMENDED that\n the matrix be encoded as a scipy.sparse.csr_matrix\n \"\"\"\n self.validator.adata.X = self.validator.adata.X.toarray()\n self.validator.validate_adata()\n self.assertEqual(self.validator.warnings, [\n \"WARNING: Sparsity of 'X' is 0.875 which is greater than 0.5, and it is not a 'scipy.sparse.csr_matrix'. It is STRONGLY RECOMMENDED to use this type of matrix for the given sparsity.\"\n ])\n\n def test_raw_existence(self):\n \"\"\"\n Except for ATAC-seq and methylation data, raw data is REQUIRED\n \"\"\"\n del self.validator.adata.raw\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Raw data is missing: there is no 'raw.X' and 'X_normalization' is not 'none'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['assay_ontology_term_id'] = 'EFO:0010891'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n\n def test_final_strongly_recommended(self):\n \"\"\"\n Except for ATAC-seq and methylation data, final matrix is STRONGLY RECOMMENDED\n \"\"\"\n self.validator.adata.X = self.validator.adata.raw.X\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n self.assertEqual(self.validator.warnings, [\n \"WARNING: Only raw data was found, i.e. there is no 'raw.X' and 'uns['X_normalization']' is 'none'. It is STRONGLY RECOMMENDED that 'final' (normalized) data is provided.\"\n ])\n\n\nclass TestObs(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_column_presence(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n \"\"\"\n columns = ['assay_ontology_term_id',\n 'development_stage_ontology_term_id',\n 'disease_ontology_term_id', 'ethnicity_ontology_term_id',\n 'is_primary_data', 'sex_ontology_term_id',\n 'tissue_ontology_term_id']\n for column in columns:\n with self.subTest(column=column):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n self.validator.adata.obs.drop(column, axis=1, inplace=True)\n self.validator.adata.uns.pop('batch_condition')\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe 'obs' is missing column '{column}'.\"])\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n\n def test_obsolete_term_id(self):\n \"\"\"\n Terms documented as obsolete in an ontology MUST NOT be used. For example, EFO:0009310\n for obsolete_10x v2 was marked as obsolete in EFO version 3.31.0 and replaced by\n EFO:0009899 for 10x 3' v2.\n\n https://www.ebi.ac.uk/ols/ontologies/efo/terms?short_form=EFO_0009310\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0009310'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is a deprecated term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_assay_ontology_term_id(self):\n \"\"\"\n assay_ontology_term_id categorical with str categories.\n This MUST be an EFO term and either child of \"EFO:0002772\" or \"EFO:0010183\"\n If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to\n the most accurate term. For example, the sci-plex assay could be curated as \"EFO:0010183 (sci-plex)\"\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'CL:000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0\n ] = 'EFO:0010183 sci-plex'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_cell_type_ontology_term_id(self):\n \"\"\"\n cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.\n \"\"\"\n self.validator.adata.obs['cell_type_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid ontology term id of 'CL'.\"\n ])\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_all_species(self):\n \"\"\"\n All other it MUST be children of UBERON:0000105 and not UBERON:0000071\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'UBERON'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ,\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a child term id of '[['UBERON:0000105']]'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'UBERON:0000071'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'UBERON:0000071' in 'development_stage_ontology_term_id' is not allowed'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n\n def test_ethnicity_ontology_term_id(self):\n \"\"\"\n ethnicity_ontology_term_id categorical with str categories.\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n Otherwise, for all other organisms this MUST be \"na\".\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'MmusDv:0000003'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be 'na'.\"\n ])\n\n def test_organism_ontology_term_id(self):\n \"\"\"\n organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'unknown'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed.\"\n ])\n\n def test_tissue_ontology_term_id_base(self):\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n\n def test_is_primary_data(self):\n \"\"\"\n is_primary_data\tbool. This MUST be True if this is the canonical instance of this cellular\n observation and False if not. This is commonly False\n for meta-analyses reusing data or for secondary views of data.\n \"\"\"\n self.validator.adata.obs['is_primary_data'] = 'FALSE'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'is_primary_data' in dataframe 'obs' must be boolean, not 'object'.\"\n ])\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n\n\nclass TestValidAnndata(unittest.TestCase):\n <docstring token>\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_valid_anndata(self):\n self.validator.validate_adata()\n self.assertFalse(self.validator.errors)\n\n\nclass TestH5adValidation(unittest.TestCase):\n \"\"\"\n Checks that validation from h5ad works, only does one invalid example as extensive testing is done in the classes\n below\n \"\"\"\n\n def setUp(self):\n self.h5ad_valid_file = examples.h5ad_valid\n self.h5ad_invalid_file = examples.h5ad_invalid\n self.validator = Validator()\n\n def test_validate(self):\n self.assertTrue(self.validator.validate_adata(self.h5ad_valid_file))\n self.assertFalse(self.validator.validate_adata(self.h5ad_invalid_file))\n\n\nclass TestExpressionMatrix(unittest.TestCase):\n \"\"\"\n Fail cases for expression matrices (anndata.X and anndata.raw.X)\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_shapes(self):\n \"\"\"\n All matrix layers MUST have the same shape, and have the same cell labels and gene labels.\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.raw.var.drop('feature_is_filtered', axis=1,\n inplace=True)\n self.validator.adata.X = examples.adata_non_raw.X.copy()\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.adata = self.validator.adata[:, 1:]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n 'ERROR: Number of genes in X (3) is different than raw.X (4).'])\n\n def test_sparsity(self):\n \"\"\"\n In any layer, if a matrix has 50% or more values that are zeros, it is STRONGLY RECOMMENDED that\n the matrix be encoded as a scipy.sparse.csr_matrix\n \"\"\"\n self.validator.adata.X = self.validator.adata.X.toarray()\n self.validator.validate_adata()\n self.assertEqual(self.validator.warnings, [\n \"WARNING: Sparsity of 'X' is 0.875 which is greater than 0.5, and it is not a 'scipy.sparse.csr_matrix'. It is STRONGLY RECOMMENDED to use this type of matrix for the given sparsity.\"\n ])\n\n def test_raw_existence(self):\n \"\"\"\n Except for ATAC-seq and methylation data, raw data is REQUIRED\n \"\"\"\n del self.validator.adata.raw\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Raw data is missing: there is no 'raw.X' and 'X_normalization' is not 'none'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['assay_ontology_term_id'] = 'EFO:0010891'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n\n def test_final_strongly_recommended(self):\n \"\"\"\n Except for ATAC-seq and methylation data, final matrix is STRONGLY RECOMMENDED\n \"\"\"\n self.validator.adata.X = self.validator.adata.raw.X\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n self.assertEqual(self.validator.warnings, [\n \"WARNING: Only raw data was found, i.e. there is no 'raw.X' and 'uns['X_normalization']' is 'none'. It is STRONGLY RECOMMENDED that 'final' (normalized) data is provided.\"\n ])\n\n\nclass TestObs(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_column_presence(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n \"\"\"\n columns = ['assay_ontology_term_id',\n 'development_stage_ontology_term_id',\n 'disease_ontology_term_id', 'ethnicity_ontology_term_id',\n 'is_primary_data', 'sex_ontology_term_id',\n 'tissue_ontology_term_id']\n for column in columns:\n with self.subTest(column=column):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n self.validator.adata.obs.drop(column, axis=1, inplace=True)\n self.validator.adata.uns.pop('batch_condition')\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe 'obs' is missing column '{column}'.\"])\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n\n def test_obsolete_term_id(self):\n \"\"\"\n Terms documented as obsolete in an ontology MUST NOT be used. For example, EFO:0009310\n for obsolete_10x v2 was marked as obsolete in EFO version 3.31.0 and replaced by\n EFO:0009899 for 10x 3' v2.\n\n https://www.ebi.ac.uk/ols/ontologies/efo/terms?short_form=EFO_0009310\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0009310'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is a deprecated term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_assay_ontology_term_id(self):\n \"\"\"\n assay_ontology_term_id categorical with str categories.\n This MUST be an EFO term and either child of \"EFO:0002772\" or \"EFO:0010183\"\n If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to\n the most accurate term. For example, the sci-plex assay could be curated as \"EFO:0010183 (sci-plex)\"\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'CL:000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0\n ] = 'EFO:0010183 sci-plex'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_cell_type_ontology_term_id(self):\n \"\"\"\n cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.\n \"\"\"\n self.validator.adata.obs['cell_type_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid ontology term id of 'CL'.\"\n ])\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_all_species(self):\n \"\"\"\n All other it MUST be children of UBERON:0000105 and not UBERON:0000071\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'UBERON'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ,\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a child term id of '[['UBERON:0000105']]'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'UBERON:0000071'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'UBERON:0000071' in 'development_stage_ontology_term_id' is not allowed'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n\n def test_ethnicity_ontology_term_id(self):\n \"\"\"\n ethnicity_ontology_term_id categorical with str categories.\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n Otherwise, for all other organisms this MUST be \"na\".\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'MmusDv:0000003'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be 'na'.\"\n ])\n\n def test_organism_ontology_term_id(self):\n \"\"\"\n organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'unknown'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed.\"\n ])\n\n def test_tissue_ontology_term_id_base(self):\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n\n def test_is_primary_data(self):\n \"\"\"\n is_primary_data\tbool. This MUST be True if this is the canonical instance of this cellular\n observation and False if not. This is commonly False\n for meta-analyses reusing data or for secondary views of data.\n \"\"\"\n self.validator.adata.obs['is_primary_data'] = 'FALSE'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'is_primary_data' in dataframe 'obs' must be boolean, not 'object'.\"\n ])\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n\n\nclass TestValidAnndata(unittest.TestCase):\n <docstring token>\n <function token>\n\n def test_valid_anndata(self):\n self.validator.validate_adata()\n self.assertFalse(self.validator.errors)\n\n\nclass TestH5adValidation(unittest.TestCase):\n \"\"\"\n Checks that validation from h5ad works, only does one invalid example as extensive testing is done in the classes\n below\n \"\"\"\n\n def setUp(self):\n self.h5ad_valid_file = examples.h5ad_valid\n self.h5ad_invalid_file = examples.h5ad_invalid\n self.validator = Validator()\n\n def test_validate(self):\n self.assertTrue(self.validator.validate_adata(self.h5ad_valid_file))\n self.assertFalse(self.validator.validate_adata(self.h5ad_invalid_file))\n\n\nclass TestExpressionMatrix(unittest.TestCase):\n \"\"\"\n Fail cases for expression matrices (anndata.X and anndata.raw.X)\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_shapes(self):\n \"\"\"\n All matrix layers MUST have the same shape, and have the same cell labels and gene labels.\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.raw.var.drop('feature_is_filtered', axis=1,\n inplace=True)\n self.validator.adata.X = examples.adata_non_raw.X.copy()\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.adata = self.validator.adata[:, 1:]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n 'ERROR: Number of genes in X (3) is different than raw.X (4).'])\n\n def test_sparsity(self):\n \"\"\"\n In any layer, if a matrix has 50% or more values that are zeros, it is STRONGLY RECOMMENDED that\n the matrix be encoded as a scipy.sparse.csr_matrix\n \"\"\"\n self.validator.adata.X = self.validator.adata.X.toarray()\n self.validator.validate_adata()\n self.assertEqual(self.validator.warnings, [\n \"WARNING: Sparsity of 'X' is 0.875 which is greater than 0.5, and it is not a 'scipy.sparse.csr_matrix'. It is STRONGLY RECOMMENDED to use this type of matrix for the given sparsity.\"\n ])\n\n def test_raw_existence(self):\n \"\"\"\n Except for ATAC-seq and methylation data, raw data is REQUIRED\n \"\"\"\n del self.validator.adata.raw\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Raw data is missing: there is no 'raw.X' and 'X_normalization' is not 'none'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['assay_ontology_term_id'] = 'EFO:0010891'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n\n def test_final_strongly_recommended(self):\n \"\"\"\n Except for ATAC-seq and methylation data, final matrix is STRONGLY RECOMMENDED\n \"\"\"\n self.validator.adata.X = self.validator.adata.raw.X\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n self.assertEqual(self.validator.warnings, [\n \"WARNING: Only raw data was found, i.e. there is no 'raw.X' and 'uns['X_normalization']' is 'none'. It is STRONGLY RECOMMENDED that 'final' (normalized) data is provided.\"\n ])\n\n\nclass TestObs(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_column_presence(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n \"\"\"\n columns = ['assay_ontology_term_id',\n 'development_stage_ontology_term_id',\n 'disease_ontology_term_id', 'ethnicity_ontology_term_id',\n 'is_primary_data', 'sex_ontology_term_id',\n 'tissue_ontology_term_id']\n for column in columns:\n with self.subTest(column=column):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n self.validator.adata.obs.drop(column, axis=1, inplace=True)\n self.validator.adata.uns.pop('batch_condition')\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe 'obs' is missing column '{column}'.\"])\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n\n def test_obsolete_term_id(self):\n \"\"\"\n Terms documented as obsolete in an ontology MUST NOT be used. For example, EFO:0009310\n for obsolete_10x v2 was marked as obsolete in EFO version 3.31.0 and replaced by\n EFO:0009899 for 10x 3' v2.\n\n https://www.ebi.ac.uk/ols/ontologies/efo/terms?short_form=EFO_0009310\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0009310'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is a deprecated term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_assay_ontology_term_id(self):\n \"\"\"\n assay_ontology_term_id categorical with str categories.\n This MUST be an EFO term and either child of \"EFO:0002772\" or \"EFO:0010183\"\n If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to\n the most accurate term. For example, the sci-plex assay could be curated as \"EFO:0010183 (sci-plex)\"\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'CL:000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0\n ] = 'EFO:0010183 sci-plex'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_cell_type_ontology_term_id(self):\n \"\"\"\n cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.\n \"\"\"\n self.validator.adata.obs['cell_type_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid ontology term id of 'CL'.\"\n ])\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_all_species(self):\n \"\"\"\n All other it MUST be children of UBERON:0000105 and not UBERON:0000071\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'UBERON'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ,\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a child term id of '[['UBERON:0000105']]'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'UBERON:0000071'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'UBERON:0000071' in 'development_stage_ontology_term_id' is not allowed'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n\n def test_ethnicity_ontology_term_id(self):\n \"\"\"\n ethnicity_ontology_term_id categorical with str categories.\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n Otherwise, for all other organisms this MUST be \"na\".\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'MmusDv:0000003'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be 'na'.\"\n ])\n\n def test_organism_ontology_term_id(self):\n \"\"\"\n organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'unknown'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed.\"\n ])\n\n def test_tissue_ontology_term_id_base(self):\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n\n def test_is_primary_data(self):\n \"\"\"\n is_primary_data\tbool. This MUST be True if this is the canonical instance of this cellular\n observation and False if not. This is commonly False\n for meta-analyses reusing data or for secondary views of data.\n \"\"\"\n self.validator.adata.obs['is_primary_data'] = 'FALSE'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'is_primary_data' in dataframe 'obs' must be boolean, not 'object'.\"\n ])\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n\n\nclass TestValidAnndata(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n\n\nclass TestH5adValidation(unittest.TestCase):\n \"\"\"\n Checks that validation from h5ad works, only does one invalid example as extensive testing is done in the classes\n below\n \"\"\"\n\n def setUp(self):\n self.h5ad_valid_file = examples.h5ad_valid\n self.h5ad_invalid_file = examples.h5ad_invalid\n self.validator = Validator()\n\n def test_validate(self):\n self.assertTrue(self.validator.validate_adata(self.h5ad_valid_file))\n self.assertFalse(self.validator.validate_adata(self.h5ad_invalid_file))\n\n\nclass TestExpressionMatrix(unittest.TestCase):\n \"\"\"\n Fail cases for expression matrices (anndata.X and anndata.raw.X)\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_shapes(self):\n \"\"\"\n All matrix layers MUST have the same shape, and have the same cell labels and gene labels.\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.raw.var.drop('feature_is_filtered', axis=1,\n inplace=True)\n self.validator.adata.X = examples.adata_non_raw.X.copy()\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.adata = self.validator.adata[:, 1:]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n 'ERROR: Number of genes in X (3) is different than raw.X (4).'])\n\n def test_sparsity(self):\n \"\"\"\n In any layer, if a matrix has 50% or more values that are zeros, it is STRONGLY RECOMMENDED that\n the matrix be encoded as a scipy.sparse.csr_matrix\n \"\"\"\n self.validator.adata.X = self.validator.adata.X.toarray()\n self.validator.validate_adata()\n self.assertEqual(self.validator.warnings, [\n \"WARNING: Sparsity of 'X' is 0.875 which is greater than 0.5, and it is not a 'scipy.sparse.csr_matrix'. It is STRONGLY RECOMMENDED to use this type of matrix for the given sparsity.\"\n ])\n\n def test_raw_existence(self):\n \"\"\"\n Except for ATAC-seq and methylation data, raw data is REQUIRED\n \"\"\"\n del self.validator.adata.raw\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Raw data is missing: there is no 'raw.X' and 'X_normalization' is not 'none'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['assay_ontology_term_id'] = 'EFO:0010891'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n\n def test_final_strongly_recommended(self):\n \"\"\"\n Except for ATAC-seq and methylation data, final matrix is STRONGLY RECOMMENDED\n \"\"\"\n self.validator.adata.X = self.validator.adata.raw.X\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n self.assertEqual(self.validator.warnings, [\n \"WARNING: Only raw data was found, i.e. there is no 'raw.X' and 'uns['X_normalization']' is 'none'. It is STRONGLY RECOMMENDED that 'final' (normalized) data is provided.\"\n ])\n\n\nclass TestObs(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_column_presence(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n \"\"\"\n columns = ['assay_ontology_term_id',\n 'development_stage_ontology_term_id',\n 'disease_ontology_term_id', 'ethnicity_ontology_term_id',\n 'is_primary_data', 'sex_ontology_term_id',\n 'tissue_ontology_term_id']\n for column in columns:\n with self.subTest(column=column):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n self.validator.adata.obs.drop(column, axis=1, inplace=True)\n self.validator.adata.uns.pop('batch_condition')\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe 'obs' is missing column '{column}'.\"])\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n\n def test_obsolete_term_id(self):\n \"\"\"\n Terms documented as obsolete in an ontology MUST NOT be used. For example, EFO:0009310\n for obsolete_10x v2 was marked as obsolete in EFO version 3.31.0 and replaced by\n EFO:0009899 for 10x 3' v2.\n\n https://www.ebi.ac.uk/ols/ontologies/efo/terms?short_form=EFO_0009310\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0009310'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is a deprecated term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_assay_ontology_term_id(self):\n \"\"\"\n assay_ontology_term_id categorical with str categories.\n This MUST be an EFO term and either child of \"EFO:0002772\" or \"EFO:0010183\"\n If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to\n the most accurate term. For example, the sci-plex assay could be curated as \"EFO:0010183 (sci-plex)\"\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'CL:000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0\n ] = 'EFO:0010183 sci-plex'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_cell_type_ontology_term_id(self):\n \"\"\"\n cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.\n \"\"\"\n self.validator.adata.obs['cell_type_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid ontology term id of 'CL'.\"\n ])\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_all_species(self):\n \"\"\"\n All other it MUST be children of UBERON:0000105 and not UBERON:0000071\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'UBERON'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ,\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a child term id of '[['UBERON:0000105']]'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'UBERON:0000071'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'UBERON:0000071' in 'development_stage_ontology_term_id' is not allowed'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n\n def test_ethnicity_ontology_term_id(self):\n \"\"\"\n ethnicity_ontology_term_id categorical with str categories.\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n Otherwise, for all other organisms this MUST be \"na\".\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'MmusDv:0000003'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be 'na'.\"\n ])\n\n def test_organism_ontology_term_id(self):\n \"\"\"\n organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'unknown'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed.\"\n ])\n\n def test_tissue_ontology_term_id_base(self):\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n\n def test_is_primary_data(self):\n \"\"\"\n is_primary_data\tbool. This MUST be True if this is the canonical instance of this cellular\n observation and False if not. This is commonly False\n for meta-analyses reusing data or for secondary views of data.\n \"\"\"\n self.validator.adata.obs['is_primary_data'] = 'FALSE'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'is_primary_data' in dataframe 'obs' must be boolean, not 'object'.\"\n ])\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n\n\nclass TestH5adValidation(unittest.TestCase):\n \"\"\"\n Checks that validation from h5ad works, only does one invalid example as extensive testing is done in the classes\n below\n \"\"\"\n\n def setUp(self):\n self.h5ad_valid_file = examples.h5ad_valid\n self.h5ad_invalid_file = examples.h5ad_invalid\n self.validator = Validator()\n\n def test_validate(self):\n self.assertTrue(self.validator.validate_adata(self.h5ad_valid_file))\n self.assertFalse(self.validator.validate_adata(self.h5ad_invalid_file))\n\n\nclass TestExpressionMatrix(unittest.TestCase):\n \"\"\"\n Fail cases for expression matrices (anndata.X and anndata.raw.X)\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_shapes(self):\n \"\"\"\n All matrix layers MUST have the same shape, and have the same cell labels and gene labels.\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.raw.var.drop('feature_is_filtered', axis=1,\n inplace=True)\n self.validator.adata.X = examples.adata_non_raw.X.copy()\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.adata = self.validator.adata[:, 1:]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n 'ERROR: Number of genes in X (3) is different than raw.X (4).'])\n\n def test_sparsity(self):\n \"\"\"\n In any layer, if a matrix has 50% or more values that are zeros, it is STRONGLY RECOMMENDED that\n the matrix be encoded as a scipy.sparse.csr_matrix\n \"\"\"\n self.validator.adata.X = self.validator.adata.X.toarray()\n self.validator.validate_adata()\n self.assertEqual(self.validator.warnings, [\n \"WARNING: Sparsity of 'X' is 0.875 which is greater than 0.5, and it is not a 'scipy.sparse.csr_matrix'. It is STRONGLY RECOMMENDED to use this type of matrix for the given sparsity.\"\n ])\n\n def test_raw_existence(self):\n \"\"\"\n Except for ATAC-seq and methylation data, raw data is REQUIRED\n \"\"\"\n del self.validator.adata.raw\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Raw data is missing: there is no 'raw.X' and 'X_normalization' is not 'none'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['assay_ontology_term_id'] = 'EFO:0010891'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n\n def test_final_strongly_recommended(self):\n \"\"\"\n Except for ATAC-seq and methylation data, final matrix is STRONGLY RECOMMENDED\n \"\"\"\n self.validator.adata.X = self.validator.adata.raw.X\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n self.assertEqual(self.validator.warnings, [\n \"WARNING: Only raw data was found, i.e. there is no 'raw.X' and 'uns['X_normalization']' is 'none'. It is STRONGLY RECOMMENDED that 'final' (normalized) data is provided.\"\n ])\n\n\nclass TestObs(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_column_presence(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n \"\"\"\n columns = ['assay_ontology_term_id',\n 'development_stage_ontology_term_id',\n 'disease_ontology_term_id', 'ethnicity_ontology_term_id',\n 'is_primary_data', 'sex_ontology_term_id',\n 'tissue_ontology_term_id']\n for column in columns:\n with self.subTest(column=column):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n self.validator.adata.obs.drop(column, axis=1, inplace=True)\n self.validator.adata.uns.pop('batch_condition')\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe 'obs' is missing column '{column}'.\"])\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n\n def test_obsolete_term_id(self):\n \"\"\"\n Terms documented as obsolete in an ontology MUST NOT be used. For example, EFO:0009310\n for obsolete_10x v2 was marked as obsolete in EFO version 3.31.0 and replaced by\n EFO:0009899 for 10x 3' v2.\n\n https://www.ebi.ac.uk/ols/ontologies/efo/terms?short_form=EFO_0009310\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0009310'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is a deprecated term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_assay_ontology_term_id(self):\n \"\"\"\n assay_ontology_term_id categorical with str categories.\n This MUST be an EFO term and either child of \"EFO:0002772\" or \"EFO:0010183\"\n If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to\n the most accurate term. For example, the sci-plex assay could be curated as \"EFO:0010183 (sci-plex)\"\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'CL:000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0\n ] = 'EFO:0010183 sci-plex'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_cell_type_ontology_term_id(self):\n \"\"\"\n cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.\n \"\"\"\n self.validator.adata.obs['cell_type_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid ontology term id of 'CL'.\"\n ])\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_all_species(self):\n \"\"\"\n All other it MUST be children of UBERON:0000105 and not UBERON:0000071\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'UBERON'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ,\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a child term id of '[['UBERON:0000105']]'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'UBERON:0000071'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'UBERON:0000071' in 'development_stage_ontology_term_id' is not allowed'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n\n def test_ethnicity_ontology_term_id(self):\n \"\"\"\n ethnicity_ontology_term_id categorical with str categories.\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n Otherwise, for all other organisms this MUST be \"na\".\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'MmusDv:0000003'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be 'na'.\"\n ])\n\n def test_organism_ontology_term_id(self):\n \"\"\"\n organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'unknown'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed.\"\n ])\n\n def test_tissue_ontology_term_id_base(self):\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n\n def test_is_primary_data(self):\n \"\"\"\n is_primary_data\tbool. This MUST be True if this is the canonical instance of this cellular\n observation and False if not. This is commonly False\n for meta-analyses reusing data or for secondary views of data.\n \"\"\"\n self.validator.adata.obs['is_primary_data'] = 'FALSE'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'is_primary_data' in dataframe 'obs' must be boolean, not 'object'.\"\n ])\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n\n\nclass TestH5adValidation(unittest.TestCase):\n <docstring token>\n\n def setUp(self):\n self.h5ad_valid_file = examples.h5ad_valid\n self.h5ad_invalid_file = examples.h5ad_invalid\n self.validator = Validator()\n\n def test_validate(self):\n self.assertTrue(self.validator.validate_adata(self.h5ad_valid_file))\n self.assertFalse(self.validator.validate_adata(self.h5ad_invalid_file))\n\n\nclass TestExpressionMatrix(unittest.TestCase):\n \"\"\"\n Fail cases for expression matrices (anndata.X and anndata.raw.X)\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_shapes(self):\n \"\"\"\n All matrix layers MUST have the same shape, and have the same cell labels and gene labels.\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.raw.var.drop('feature_is_filtered', axis=1,\n inplace=True)\n self.validator.adata.X = examples.adata_non_raw.X.copy()\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.adata = self.validator.adata[:, 1:]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n 'ERROR: Number of genes in X (3) is different than raw.X (4).'])\n\n def test_sparsity(self):\n \"\"\"\n In any layer, if a matrix has 50% or more values that are zeros, it is STRONGLY RECOMMENDED that\n the matrix be encoded as a scipy.sparse.csr_matrix\n \"\"\"\n self.validator.adata.X = self.validator.adata.X.toarray()\n self.validator.validate_adata()\n self.assertEqual(self.validator.warnings, [\n \"WARNING: Sparsity of 'X' is 0.875 which is greater than 0.5, and it is not a 'scipy.sparse.csr_matrix'. It is STRONGLY RECOMMENDED to use this type of matrix for the given sparsity.\"\n ])\n\n def test_raw_existence(self):\n \"\"\"\n Except for ATAC-seq and methylation data, raw data is REQUIRED\n \"\"\"\n del self.validator.adata.raw\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Raw data is missing: there is no 'raw.X' and 'X_normalization' is not 'none'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['assay_ontology_term_id'] = 'EFO:0010891'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n\n def test_final_strongly_recommended(self):\n \"\"\"\n Except for ATAC-seq and methylation data, final matrix is STRONGLY RECOMMENDED\n \"\"\"\n self.validator.adata.X = self.validator.adata.raw.X\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n self.assertEqual(self.validator.warnings, [\n \"WARNING: Only raw data was found, i.e. there is no 'raw.X' and 'uns['X_normalization']' is 'none'. It is STRONGLY RECOMMENDED that 'final' (normalized) data is provided.\"\n ])\n\n\nclass TestObs(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_column_presence(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n \"\"\"\n columns = ['assay_ontology_term_id',\n 'development_stage_ontology_term_id',\n 'disease_ontology_term_id', 'ethnicity_ontology_term_id',\n 'is_primary_data', 'sex_ontology_term_id',\n 'tissue_ontology_term_id']\n for column in columns:\n with self.subTest(column=column):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n self.validator.adata.obs.drop(column, axis=1, inplace=True)\n self.validator.adata.uns.pop('batch_condition')\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe 'obs' is missing column '{column}'.\"])\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n\n def test_obsolete_term_id(self):\n \"\"\"\n Terms documented as obsolete in an ontology MUST NOT be used. For example, EFO:0009310\n for obsolete_10x v2 was marked as obsolete in EFO version 3.31.0 and replaced by\n EFO:0009899 for 10x 3' v2.\n\n https://www.ebi.ac.uk/ols/ontologies/efo/terms?short_form=EFO_0009310\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0009310'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is a deprecated term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_assay_ontology_term_id(self):\n \"\"\"\n assay_ontology_term_id categorical with str categories.\n This MUST be an EFO term and either child of \"EFO:0002772\" or \"EFO:0010183\"\n If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to\n the most accurate term. For example, the sci-plex assay could be curated as \"EFO:0010183 (sci-plex)\"\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'CL:000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0\n ] = 'EFO:0010183 sci-plex'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_cell_type_ontology_term_id(self):\n \"\"\"\n cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.\n \"\"\"\n self.validator.adata.obs['cell_type_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid ontology term id of 'CL'.\"\n ])\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_all_species(self):\n \"\"\"\n All other it MUST be children of UBERON:0000105 and not UBERON:0000071\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'UBERON'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ,\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a child term id of '[['UBERON:0000105']]'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'UBERON:0000071'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'UBERON:0000071' in 'development_stage_ontology_term_id' is not allowed'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n\n def test_ethnicity_ontology_term_id(self):\n \"\"\"\n ethnicity_ontology_term_id categorical with str categories.\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n Otherwise, for all other organisms this MUST be \"na\".\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'MmusDv:0000003'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be 'na'.\"\n ])\n\n def test_organism_ontology_term_id(self):\n \"\"\"\n organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'unknown'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed.\"\n ])\n\n def test_tissue_ontology_term_id_base(self):\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n\n def test_is_primary_data(self):\n \"\"\"\n is_primary_data\tbool. This MUST be True if this is the canonical instance of this cellular\n observation and False if not. This is commonly False\n for meta-analyses reusing data or for secondary views of data.\n \"\"\"\n self.validator.adata.obs['is_primary_data'] = 'FALSE'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'is_primary_data' in dataframe 'obs' must be boolean, not 'object'.\"\n ])\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n\n\nclass TestH5adValidation(unittest.TestCase):\n <docstring token>\n <function token>\n\n def test_validate(self):\n self.assertTrue(self.validator.validate_adata(self.h5ad_valid_file))\n self.assertFalse(self.validator.validate_adata(self.h5ad_invalid_file))\n\n\nclass TestExpressionMatrix(unittest.TestCase):\n \"\"\"\n Fail cases for expression matrices (anndata.X and anndata.raw.X)\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_shapes(self):\n \"\"\"\n All matrix layers MUST have the same shape, and have the same cell labels and gene labels.\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.raw.var.drop('feature_is_filtered', axis=1,\n inplace=True)\n self.validator.adata.X = examples.adata_non_raw.X.copy()\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.adata = self.validator.adata[:, 1:]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n 'ERROR: Number of genes in X (3) is different than raw.X (4).'])\n\n def test_sparsity(self):\n \"\"\"\n In any layer, if a matrix has 50% or more values that are zeros, it is STRONGLY RECOMMENDED that\n the matrix be encoded as a scipy.sparse.csr_matrix\n \"\"\"\n self.validator.adata.X = self.validator.adata.X.toarray()\n self.validator.validate_adata()\n self.assertEqual(self.validator.warnings, [\n \"WARNING: Sparsity of 'X' is 0.875 which is greater than 0.5, and it is not a 'scipy.sparse.csr_matrix'. It is STRONGLY RECOMMENDED to use this type of matrix for the given sparsity.\"\n ])\n\n def test_raw_existence(self):\n \"\"\"\n Except for ATAC-seq and methylation data, raw data is REQUIRED\n \"\"\"\n del self.validator.adata.raw\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Raw data is missing: there is no 'raw.X' and 'X_normalization' is not 'none'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['assay_ontology_term_id'] = 'EFO:0010891'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n\n def test_final_strongly_recommended(self):\n \"\"\"\n Except for ATAC-seq and methylation data, final matrix is STRONGLY RECOMMENDED\n \"\"\"\n self.validator.adata.X = self.validator.adata.raw.X\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n self.assertEqual(self.validator.warnings, [\n \"WARNING: Only raw data was found, i.e. there is no 'raw.X' and 'uns['X_normalization']' is 'none'. It is STRONGLY RECOMMENDED that 'final' (normalized) data is provided.\"\n ])\n\n\nclass TestObs(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_column_presence(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n \"\"\"\n columns = ['assay_ontology_term_id',\n 'development_stage_ontology_term_id',\n 'disease_ontology_term_id', 'ethnicity_ontology_term_id',\n 'is_primary_data', 'sex_ontology_term_id',\n 'tissue_ontology_term_id']\n for column in columns:\n with self.subTest(column=column):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n self.validator.adata.obs.drop(column, axis=1, inplace=True)\n self.validator.adata.uns.pop('batch_condition')\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe 'obs' is missing column '{column}'.\"])\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n\n def test_obsolete_term_id(self):\n \"\"\"\n Terms documented as obsolete in an ontology MUST NOT be used. For example, EFO:0009310\n for obsolete_10x v2 was marked as obsolete in EFO version 3.31.0 and replaced by\n EFO:0009899 for 10x 3' v2.\n\n https://www.ebi.ac.uk/ols/ontologies/efo/terms?short_form=EFO_0009310\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0009310'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is a deprecated term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_assay_ontology_term_id(self):\n \"\"\"\n assay_ontology_term_id categorical with str categories.\n This MUST be an EFO term and either child of \"EFO:0002772\" or \"EFO:0010183\"\n If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to\n the most accurate term. For example, the sci-plex assay could be curated as \"EFO:0010183 (sci-plex)\"\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'CL:000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0\n ] = 'EFO:0010183 sci-plex'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_cell_type_ontology_term_id(self):\n \"\"\"\n cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.\n \"\"\"\n self.validator.adata.obs['cell_type_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid ontology term id of 'CL'.\"\n ])\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_all_species(self):\n \"\"\"\n All other it MUST be children of UBERON:0000105 and not UBERON:0000071\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'UBERON'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ,\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a child term id of '[['UBERON:0000105']]'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'UBERON:0000071'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'UBERON:0000071' in 'development_stage_ontology_term_id' is not allowed'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n\n def test_ethnicity_ontology_term_id(self):\n \"\"\"\n ethnicity_ontology_term_id categorical with str categories.\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n Otherwise, for all other organisms this MUST be \"na\".\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'MmusDv:0000003'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be 'na'.\"\n ])\n\n def test_organism_ontology_term_id(self):\n \"\"\"\n organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'unknown'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed.\"\n ])\n\n def test_tissue_ontology_term_id_base(self):\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n\n def test_is_primary_data(self):\n \"\"\"\n is_primary_data\tbool. This MUST be True if this is the canonical instance of this cellular\n observation and False if not. This is commonly False\n for meta-analyses reusing data or for secondary views of data.\n \"\"\"\n self.validator.adata.obs['is_primary_data'] = 'FALSE'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'is_primary_data' in dataframe 'obs' must be boolean, not 'object'.\"\n ])\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n\n\nclass TestH5adValidation(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n\n\nclass TestExpressionMatrix(unittest.TestCase):\n \"\"\"\n Fail cases for expression matrices (anndata.X and anndata.raw.X)\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_shapes(self):\n \"\"\"\n All matrix layers MUST have the same shape, and have the same cell labels and gene labels.\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.raw.var.drop('feature_is_filtered', axis=1,\n inplace=True)\n self.validator.adata.X = examples.adata_non_raw.X.copy()\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.adata = self.validator.adata[:, 1:]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n 'ERROR: Number of genes in X (3) is different than raw.X (4).'])\n\n def test_sparsity(self):\n \"\"\"\n In any layer, if a matrix has 50% or more values that are zeros, it is STRONGLY RECOMMENDED that\n the matrix be encoded as a scipy.sparse.csr_matrix\n \"\"\"\n self.validator.adata.X = self.validator.adata.X.toarray()\n self.validator.validate_adata()\n self.assertEqual(self.validator.warnings, [\n \"WARNING: Sparsity of 'X' is 0.875 which is greater than 0.5, and it is not a 'scipy.sparse.csr_matrix'. It is STRONGLY RECOMMENDED to use this type of matrix for the given sparsity.\"\n ])\n\n def test_raw_existence(self):\n \"\"\"\n Except for ATAC-seq and methylation data, raw data is REQUIRED\n \"\"\"\n del self.validator.adata.raw\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Raw data is missing: there is no 'raw.X' and 'X_normalization' is not 'none'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['assay_ontology_term_id'] = 'EFO:0010891'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n\n def test_final_strongly_recommended(self):\n \"\"\"\n Except for ATAC-seq and methylation data, final matrix is STRONGLY RECOMMENDED\n \"\"\"\n self.validator.adata.X = self.validator.adata.raw.X\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n self.assertEqual(self.validator.warnings, [\n \"WARNING: Only raw data was found, i.e. there is no 'raw.X' and 'uns['X_normalization']' is 'none'. It is STRONGLY RECOMMENDED that 'final' (normalized) data is provided.\"\n ])\n\n\nclass TestObs(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_column_presence(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n \"\"\"\n columns = ['assay_ontology_term_id',\n 'development_stage_ontology_term_id',\n 'disease_ontology_term_id', 'ethnicity_ontology_term_id',\n 'is_primary_data', 'sex_ontology_term_id',\n 'tissue_ontology_term_id']\n for column in columns:\n with self.subTest(column=column):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n self.validator.adata.obs.drop(column, axis=1, inplace=True)\n self.validator.adata.uns.pop('batch_condition')\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe 'obs' is missing column '{column}'.\"])\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n\n def test_obsolete_term_id(self):\n \"\"\"\n Terms documented as obsolete in an ontology MUST NOT be used. For example, EFO:0009310\n for obsolete_10x v2 was marked as obsolete in EFO version 3.31.0 and replaced by\n EFO:0009899 for 10x 3' v2.\n\n https://www.ebi.ac.uk/ols/ontologies/efo/terms?short_form=EFO_0009310\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0009310'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is a deprecated term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_assay_ontology_term_id(self):\n \"\"\"\n assay_ontology_term_id categorical with str categories.\n This MUST be an EFO term and either child of \"EFO:0002772\" or \"EFO:0010183\"\n If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to\n the most accurate term. For example, the sci-plex assay could be curated as \"EFO:0010183 (sci-plex)\"\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'CL:000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0\n ] = 'EFO:0010183 sci-plex'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_cell_type_ontology_term_id(self):\n \"\"\"\n cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.\n \"\"\"\n self.validator.adata.obs['cell_type_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid ontology term id of 'CL'.\"\n ])\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_all_species(self):\n \"\"\"\n All other it MUST be children of UBERON:0000105 and not UBERON:0000071\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'UBERON'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ,\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a child term id of '[['UBERON:0000105']]'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'UBERON:0000071'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'UBERON:0000071' in 'development_stage_ontology_term_id' is not allowed'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n\n def test_ethnicity_ontology_term_id(self):\n \"\"\"\n ethnicity_ontology_term_id categorical with str categories.\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n Otherwise, for all other organisms this MUST be \"na\".\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'MmusDv:0000003'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be 'na'.\"\n ])\n\n def test_organism_ontology_term_id(self):\n \"\"\"\n organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'unknown'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed.\"\n ])\n\n def test_tissue_ontology_term_id_base(self):\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n\n def test_is_primary_data(self):\n \"\"\"\n is_primary_data\tbool. This MUST be True if this is the canonical instance of this cellular\n observation and False if not. This is commonly False\n for meta-analyses reusing data or for secondary views of data.\n \"\"\"\n self.validator.adata.obs['is_primary_data'] = 'FALSE'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'is_primary_data' in dataframe 'obs' must be boolean, not 'object'.\"\n ])\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n\n\nclass TestExpressionMatrix(unittest.TestCase):\n \"\"\"\n Fail cases for expression matrices (anndata.X and anndata.raw.X)\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_shapes(self):\n \"\"\"\n All matrix layers MUST have the same shape, and have the same cell labels and gene labels.\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.raw.var.drop('feature_is_filtered', axis=1,\n inplace=True)\n self.validator.adata.X = examples.adata_non_raw.X.copy()\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.adata = self.validator.adata[:, 1:]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n 'ERROR: Number of genes in X (3) is different than raw.X (4).'])\n\n def test_sparsity(self):\n \"\"\"\n In any layer, if a matrix has 50% or more values that are zeros, it is STRONGLY RECOMMENDED that\n the matrix be encoded as a scipy.sparse.csr_matrix\n \"\"\"\n self.validator.adata.X = self.validator.adata.X.toarray()\n self.validator.validate_adata()\n self.assertEqual(self.validator.warnings, [\n \"WARNING: Sparsity of 'X' is 0.875 which is greater than 0.5, and it is not a 'scipy.sparse.csr_matrix'. It is STRONGLY RECOMMENDED to use this type of matrix for the given sparsity.\"\n ])\n\n def test_raw_existence(self):\n \"\"\"\n Except for ATAC-seq and methylation data, raw data is REQUIRED\n \"\"\"\n del self.validator.adata.raw\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Raw data is missing: there is no 'raw.X' and 'X_normalization' is not 'none'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['assay_ontology_term_id'] = 'EFO:0010891'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n\n def test_final_strongly_recommended(self):\n \"\"\"\n Except for ATAC-seq and methylation data, final matrix is STRONGLY RECOMMENDED\n \"\"\"\n self.validator.adata.X = self.validator.adata.raw.X\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n self.assertEqual(self.validator.warnings, [\n \"WARNING: Only raw data was found, i.e. there is no 'raw.X' and 'uns['X_normalization']' is 'none'. It is STRONGLY RECOMMENDED that 'final' (normalized) data is provided.\"\n ])\n\n\nclass TestObs(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_column_presence(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n \"\"\"\n columns = ['assay_ontology_term_id',\n 'development_stage_ontology_term_id',\n 'disease_ontology_term_id', 'ethnicity_ontology_term_id',\n 'is_primary_data', 'sex_ontology_term_id',\n 'tissue_ontology_term_id']\n for column in columns:\n with self.subTest(column=column):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n self.validator.adata.obs.drop(column, axis=1, inplace=True)\n self.validator.adata.uns.pop('batch_condition')\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe 'obs' is missing column '{column}'.\"])\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n\n def test_obsolete_term_id(self):\n \"\"\"\n Terms documented as obsolete in an ontology MUST NOT be used. For example, EFO:0009310\n for obsolete_10x v2 was marked as obsolete in EFO version 3.31.0 and replaced by\n EFO:0009899 for 10x 3' v2.\n\n https://www.ebi.ac.uk/ols/ontologies/efo/terms?short_form=EFO_0009310\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0009310'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is a deprecated term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_assay_ontology_term_id(self):\n \"\"\"\n assay_ontology_term_id categorical with str categories.\n This MUST be an EFO term and either child of \"EFO:0002772\" or \"EFO:0010183\"\n If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to\n the most accurate term. For example, the sci-plex assay could be curated as \"EFO:0010183 (sci-plex)\"\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'CL:000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0\n ] = 'EFO:0010183 sci-plex'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_cell_type_ontology_term_id(self):\n \"\"\"\n cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.\n \"\"\"\n self.validator.adata.obs['cell_type_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid ontology term id of 'CL'.\"\n ])\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_all_species(self):\n \"\"\"\n All other it MUST be children of UBERON:0000105 and not UBERON:0000071\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'UBERON'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ,\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a child term id of '[['UBERON:0000105']]'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'UBERON:0000071'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'UBERON:0000071' in 'development_stage_ontology_term_id' is not allowed'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n\n def test_ethnicity_ontology_term_id(self):\n \"\"\"\n ethnicity_ontology_term_id categorical with str categories.\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n Otherwise, for all other organisms this MUST be \"na\".\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'MmusDv:0000003'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be 'na'.\"\n ])\n\n def test_organism_ontology_term_id(self):\n \"\"\"\n organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'unknown'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed.\"\n ])\n\n def test_tissue_ontology_term_id_base(self):\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n\n def test_is_primary_data(self):\n \"\"\"\n is_primary_data\tbool. This MUST be True if this is the canonical instance of this cellular\n observation and False if not. This is commonly False\n for meta-analyses reusing data or for secondary views of data.\n \"\"\"\n self.validator.adata.obs['is_primary_data'] = 'FALSE'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'is_primary_data' in dataframe 'obs' must be boolean, not 'object'.\"\n ])\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n\n\nclass TestExpressionMatrix(unittest.TestCase):\n <docstring token>\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_shapes(self):\n \"\"\"\n All matrix layers MUST have the same shape, and have the same cell labels and gene labels.\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.raw.var.drop('feature_is_filtered', axis=1,\n inplace=True)\n self.validator.adata.X = examples.adata_non_raw.X.copy()\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.adata = self.validator.adata[:, 1:]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n 'ERROR: Number of genes in X (3) is different than raw.X (4).'])\n\n def test_sparsity(self):\n \"\"\"\n In any layer, if a matrix has 50% or more values that are zeros, it is STRONGLY RECOMMENDED that\n the matrix be encoded as a scipy.sparse.csr_matrix\n \"\"\"\n self.validator.adata.X = self.validator.adata.X.toarray()\n self.validator.validate_adata()\n self.assertEqual(self.validator.warnings, [\n \"WARNING: Sparsity of 'X' is 0.875 which is greater than 0.5, and it is not a 'scipy.sparse.csr_matrix'. It is STRONGLY RECOMMENDED to use this type of matrix for the given sparsity.\"\n ])\n\n def test_raw_existence(self):\n \"\"\"\n Except for ATAC-seq and methylation data, raw data is REQUIRED\n \"\"\"\n del self.validator.adata.raw\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Raw data is missing: there is no 'raw.X' and 'X_normalization' is not 'none'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['assay_ontology_term_id'] = 'EFO:0010891'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n\n def test_final_strongly_recommended(self):\n \"\"\"\n Except for ATAC-seq and methylation data, final matrix is STRONGLY RECOMMENDED\n \"\"\"\n self.validator.adata.X = self.validator.adata.raw.X\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n self.assertEqual(self.validator.warnings, [\n \"WARNING: Only raw data was found, i.e. there is no 'raw.X' and 'uns['X_normalization']' is 'none'. It is STRONGLY RECOMMENDED that 'final' (normalized) data is provided.\"\n ])\n\n\nclass TestObs(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_column_presence(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n \"\"\"\n columns = ['assay_ontology_term_id',\n 'development_stage_ontology_term_id',\n 'disease_ontology_term_id', 'ethnicity_ontology_term_id',\n 'is_primary_data', 'sex_ontology_term_id',\n 'tissue_ontology_term_id']\n for column in columns:\n with self.subTest(column=column):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n self.validator.adata.obs.drop(column, axis=1, inplace=True)\n self.validator.adata.uns.pop('batch_condition')\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe 'obs' is missing column '{column}'.\"])\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n\n def test_obsolete_term_id(self):\n \"\"\"\n Terms documented as obsolete in an ontology MUST NOT be used. For example, EFO:0009310\n for obsolete_10x v2 was marked as obsolete in EFO version 3.31.0 and replaced by\n EFO:0009899 for 10x 3' v2.\n\n https://www.ebi.ac.uk/ols/ontologies/efo/terms?short_form=EFO_0009310\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0009310'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is a deprecated term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_assay_ontology_term_id(self):\n \"\"\"\n assay_ontology_term_id categorical with str categories.\n This MUST be an EFO term and either child of \"EFO:0002772\" or \"EFO:0010183\"\n If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to\n the most accurate term. For example, the sci-plex assay could be curated as \"EFO:0010183 (sci-plex)\"\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'CL:000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0\n ] = 'EFO:0010183 sci-plex'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_cell_type_ontology_term_id(self):\n \"\"\"\n cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.\n \"\"\"\n self.validator.adata.obs['cell_type_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid ontology term id of 'CL'.\"\n ])\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_all_species(self):\n \"\"\"\n All other it MUST be children of UBERON:0000105 and not UBERON:0000071\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'UBERON'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ,\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a child term id of '[['UBERON:0000105']]'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'UBERON:0000071'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'UBERON:0000071' in 'development_stage_ontology_term_id' is not allowed'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n\n def test_ethnicity_ontology_term_id(self):\n \"\"\"\n ethnicity_ontology_term_id categorical with str categories.\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n Otherwise, for all other organisms this MUST be \"na\".\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'MmusDv:0000003'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be 'na'.\"\n ])\n\n def test_organism_ontology_term_id(self):\n \"\"\"\n organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'unknown'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed.\"\n ])\n\n def test_tissue_ontology_term_id_base(self):\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n\n def test_is_primary_data(self):\n \"\"\"\n is_primary_data\tbool. This MUST be True if this is the canonical instance of this cellular\n observation and False if not. This is commonly False\n for meta-analyses reusing data or for secondary views of data.\n \"\"\"\n self.validator.adata.obs['is_primary_data'] = 'FALSE'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'is_primary_data' in dataframe 'obs' must be boolean, not 'object'.\"\n ])\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n\n\nclass TestExpressionMatrix(unittest.TestCase):\n <docstring token>\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_shapes(self):\n \"\"\"\n All matrix layers MUST have the same shape, and have the same cell labels and gene labels.\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.raw.var.drop('feature_is_filtered', axis=1,\n inplace=True)\n self.validator.adata.X = examples.adata_non_raw.X.copy()\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.adata = self.validator.adata[:, 1:]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n 'ERROR: Number of genes in X (3) is different than raw.X (4).'])\n\n def test_sparsity(self):\n \"\"\"\n In any layer, if a matrix has 50% or more values that are zeros, it is STRONGLY RECOMMENDED that\n the matrix be encoded as a scipy.sparse.csr_matrix\n \"\"\"\n self.validator.adata.X = self.validator.adata.X.toarray()\n self.validator.validate_adata()\n self.assertEqual(self.validator.warnings, [\n \"WARNING: Sparsity of 'X' is 0.875 which is greater than 0.5, and it is not a 'scipy.sparse.csr_matrix'. It is STRONGLY RECOMMENDED to use this type of matrix for the given sparsity.\"\n ])\n\n def test_raw_existence(self):\n \"\"\"\n Except for ATAC-seq and methylation data, raw data is REQUIRED\n \"\"\"\n del self.validator.adata.raw\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Raw data is missing: there is no 'raw.X' and 'X_normalization' is not 'none'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['assay_ontology_term_id'] = 'EFO:0010891'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n <function token>\n\n\nclass TestObs(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_column_presence(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n \"\"\"\n columns = ['assay_ontology_term_id',\n 'development_stage_ontology_term_id',\n 'disease_ontology_term_id', 'ethnicity_ontology_term_id',\n 'is_primary_data', 'sex_ontology_term_id',\n 'tissue_ontology_term_id']\n for column in columns:\n with self.subTest(column=column):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n self.validator.adata.obs.drop(column, axis=1, inplace=True)\n self.validator.adata.uns.pop('batch_condition')\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe 'obs' is missing column '{column}'.\"])\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n\n def test_obsolete_term_id(self):\n \"\"\"\n Terms documented as obsolete in an ontology MUST NOT be used. For example, EFO:0009310\n for obsolete_10x v2 was marked as obsolete in EFO version 3.31.0 and replaced by\n EFO:0009899 for 10x 3' v2.\n\n https://www.ebi.ac.uk/ols/ontologies/efo/terms?short_form=EFO_0009310\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0009310'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is a deprecated term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_assay_ontology_term_id(self):\n \"\"\"\n assay_ontology_term_id categorical with str categories.\n This MUST be an EFO term and either child of \"EFO:0002772\" or \"EFO:0010183\"\n If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to\n the most accurate term. For example, the sci-plex assay could be curated as \"EFO:0010183 (sci-plex)\"\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'CL:000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0\n ] = 'EFO:0010183 sci-plex'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_cell_type_ontology_term_id(self):\n \"\"\"\n cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.\n \"\"\"\n self.validator.adata.obs['cell_type_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid ontology term id of 'CL'.\"\n ])\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_all_species(self):\n \"\"\"\n All other it MUST be children of UBERON:0000105 and not UBERON:0000071\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'UBERON'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ,\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a child term id of '[['UBERON:0000105']]'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'UBERON:0000071'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'UBERON:0000071' in 'development_stage_ontology_term_id' is not allowed'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n\n def test_ethnicity_ontology_term_id(self):\n \"\"\"\n ethnicity_ontology_term_id categorical with str categories.\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n Otherwise, for all other organisms this MUST be \"na\".\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'MmusDv:0000003'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be 'na'.\"\n ])\n\n def test_organism_ontology_term_id(self):\n \"\"\"\n organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'unknown'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed.\"\n ])\n\n def test_tissue_ontology_term_id_base(self):\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n\n def test_is_primary_data(self):\n \"\"\"\n is_primary_data\tbool. This MUST be True if this is the canonical instance of this cellular\n observation and False if not. This is commonly False\n for meta-analyses reusing data or for secondary views of data.\n \"\"\"\n self.validator.adata.obs['is_primary_data'] = 'FALSE'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'is_primary_data' in dataframe 'obs' must be boolean, not 'object'.\"\n ])\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n\n\nclass TestExpressionMatrix(unittest.TestCase):\n <docstring token>\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_shapes(self):\n \"\"\"\n All matrix layers MUST have the same shape, and have the same cell labels and gene labels.\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.raw.var.drop('feature_is_filtered', axis=1,\n inplace=True)\n self.validator.adata.X = examples.adata_non_raw.X.copy()\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.adata = self.validator.adata[:, 1:]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n 'ERROR: Number of genes in X (3) is different than raw.X (4).'])\n <function token>\n\n def test_raw_existence(self):\n \"\"\"\n Except for ATAC-seq and methylation data, raw data is REQUIRED\n \"\"\"\n del self.validator.adata.raw\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Raw data is missing: there is no 'raw.X' and 'X_normalization' is not 'none'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['assay_ontology_term_id'] = 'EFO:0010891'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n <function token>\n\n\nclass TestObs(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_column_presence(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n \"\"\"\n columns = ['assay_ontology_term_id',\n 'development_stage_ontology_term_id',\n 'disease_ontology_term_id', 'ethnicity_ontology_term_id',\n 'is_primary_data', 'sex_ontology_term_id',\n 'tissue_ontology_term_id']\n for column in columns:\n with self.subTest(column=column):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n self.validator.adata.obs.drop(column, axis=1, inplace=True)\n self.validator.adata.uns.pop('batch_condition')\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe 'obs' is missing column '{column}'.\"])\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n\n def test_obsolete_term_id(self):\n \"\"\"\n Terms documented as obsolete in an ontology MUST NOT be used. For example, EFO:0009310\n for obsolete_10x v2 was marked as obsolete in EFO version 3.31.0 and replaced by\n EFO:0009899 for 10x 3' v2.\n\n https://www.ebi.ac.uk/ols/ontologies/efo/terms?short_form=EFO_0009310\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0009310'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is a deprecated term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_assay_ontology_term_id(self):\n \"\"\"\n assay_ontology_term_id categorical with str categories.\n This MUST be an EFO term and either child of \"EFO:0002772\" or \"EFO:0010183\"\n If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to\n the most accurate term. For example, the sci-plex assay could be curated as \"EFO:0010183 (sci-plex)\"\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'CL:000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0\n ] = 'EFO:0010183 sci-plex'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_cell_type_ontology_term_id(self):\n \"\"\"\n cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.\n \"\"\"\n self.validator.adata.obs['cell_type_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid ontology term id of 'CL'.\"\n ])\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_all_species(self):\n \"\"\"\n All other it MUST be children of UBERON:0000105 and not UBERON:0000071\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'UBERON'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ,\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a child term id of '[['UBERON:0000105']]'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'UBERON:0000071'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'UBERON:0000071' in 'development_stage_ontology_term_id' is not allowed'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n\n def test_ethnicity_ontology_term_id(self):\n \"\"\"\n ethnicity_ontology_term_id categorical with str categories.\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n Otherwise, for all other organisms this MUST be \"na\".\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'MmusDv:0000003'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be 'na'.\"\n ])\n\n def test_organism_ontology_term_id(self):\n \"\"\"\n organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'unknown'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed.\"\n ])\n\n def test_tissue_ontology_term_id_base(self):\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n\n def test_is_primary_data(self):\n \"\"\"\n is_primary_data\tbool. This MUST be True if this is the canonical instance of this cellular\n observation and False if not. This is commonly False\n for meta-analyses reusing data or for secondary views of data.\n \"\"\"\n self.validator.adata.obs['is_primary_data'] = 'FALSE'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'is_primary_data' in dataframe 'obs' must be boolean, not 'object'.\"\n ])\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n\n\nclass TestExpressionMatrix(unittest.TestCase):\n <docstring token>\n <function token>\n\n def test_shapes(self):\n \"\"\"\n All matrix layers MUST have the same shape, and have the same cell labels and gene labels.\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.raw.var.drop('feature_is_filtered', axis=1,\n inplace=True)\n self.validator.adata.X = examples.adata_non_raw.X.copy()\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.adata = self.validator.adata[:, 1:]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n 'ERROR: Number of genes in X (3) is different than raw.X (4).'])\n <function token>\n\n def test_raw_existence(self):\n \"\"\"\n Except for ATAC-seq and methylation data, raw data is REQUIRED\n \"\"\"\n del self.validator.adata.raw\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Raw data is missing: there is no 'raw.X' and 'X_normalization' is not 'none'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['assay_ontology_term_id'] = 'EFO:0010891'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n <function token>\n\n\nclass TestObs(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_column_presence(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n \"\"\"\n columns = ['assay_ontology_term_id',\n 'development_stage_ontology_term_id',\n 'disease_ontology_term_id', 'ethnicity_ontology_term_id',\n 'is_primary_data', 'sex_ontology_term_id',\n 'tissue_ontology_term_id']\n for column in columns:\n with self.subTest(column=column):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n self.validator.adata.obs.drop(column, axis=1, inplace=True)\n self.validator.adata.uns.pop('batch_condition')\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe 'obs' is missing column '{column}'.\"])\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n\n def test_obsolete_term_id(self):\n \"\"\"\n Terms documented as obsolete in an ontology MUST NOT be used. For example, EFO:0009310\n for obsolete_10x v2 was marked as obsolete in EFO version 3.31.0 and replaced by\n EFO:0009899 for 10x 3' v2.\n\n https://www.ebi.ac.uk/ols/ontologies/efo/terms?short_form=EFO_0009310\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0009310'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is a deprecated term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_assay_ontology_term_id(self):\n \"\"\"\n assay_ontology_term_id categorical with str categories.\n This MUST be an EFO term and either child of \"EFO:0002772\" or \"EFO:0010183\"\n If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to\n the most accurate term. For example, the sci-plex assay could be curated as \"EFO:0010183 (sci-plex)\"\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'CL:000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0\n ] = 'EFO:0010183 sci-plex'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_cell_type_ontology_term_id(self):\n \"\"\"\n cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.\n \"\"\"\n self.validator.adata.obs['cell_type_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid ontology term id of 'CL'.\"\n ])\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_all_species(self):\n \"\"\"\n All other it MUST be children of UBERON:0000105 and not UBERON:0000071\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'UBERON'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ,\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a child term id of '[['UBERON:0000105']]'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'UBERON:0000071'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'UBERON:0000071' in 'development_stage_ontology_term_id' is not allowed'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n\n def test_ethnicity_ontology_term_id(self):\n \"\"\"\n ethnicity_ontology_term_id categorical with str categories.\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n Otherwise, for all other organisms this MUST be \"na\".\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'MmusDv:0000003'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be 'na'.\"\n ])\n\n def test_organism_ontology_term_id(self):\n \"\"\"\n organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'unknown'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed.\"\n ])\n\n def test_tissue_ontology_term_id_base(self):\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n\n def test_is_primary_data(self):\n \"\"\"\n is_primary_data\tbool. This MUST be True if this is the canonical instance of this cellular\n observation and False if not. This is commonly False\n for meta-analyses reusing data or for secondary views of data.\n \"\"\"\n self.validator.adata.obs['is_primary_data'] = 'FALSE'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'is_primary_data' in dataframe 'obs' must be boolean, not 'object'.\"\n ])\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n\n\nclass TestExpressionMatrix(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n def test_raw_existence(self):\n \"\"\"\n Except for ATAC-seq and methylation data, raw data is REQUIRED\n \"\"\"\n del self.validator.adata.raw\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Raw data is missing: there is no 'raw.X' and 'X_normalization' is not 'none'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['assay_ontology_term_id'] = 'EFO:0010891'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n <function token>\n\n\nclass TestObs(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_column_presence(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n \"\"\"\n columns = ['assay_ontology_term_id',\n 'development_stage_ontology_term_id',\n 'disease_ontology_term_id', 'ethnicity_ontology_term_id',\n 'is_primary_data', 'sex_ontology_term_id',\n 'tissue_ontology_term_id']\n for column in columns:\n with self.subTest(column=column):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n self.validator.adata.obs.drop(column, axis=1, inplace=True)\n self.validator.adata.uns.pop('batch_condition')\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe 'obs' is missing column '{column}'.\"])\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n\n def test_obsolete_term_id(self):\n \"\"\"\n Terms documented as obsolete in an ontology MUST NOT be used. For example, EFO:0009310\n for obsolete_10x v2 was marked as obsolete in EFO version 3.31.0 and replaced by\n EFO:0009899 for 10x 3' v2.\n\n https://www.ebi.ac.uk/ols/ontologies/efo/terms?short_form=EFO_0009310\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0009310'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is a deprecated term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_assay_ontology_term_id(self):\n \"\"\"\n assay_ontology_term_id categorical with str categories.\n This MUST be an EFO term and either child of \"EFO:0002772\" or \"EFO:0010183\"\n If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to\n the most accurate term. For example, the sci-plex assay could be curated as \"EFO:0010183 (sci-plex)\"\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'CL:000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0\n ] = 'EFO:0010183 sci-plex'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_cell_type_ontology_term_id(self):\n \"\"\"\n cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.\n \"\"\"\n self.validator.adata.obs['cell_type_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid ontology term id of 'CL'.\"\n ])\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_all_species(self):\n \"\"\"\n All other it MUST be children of UBERON:0000105 and not UBERON:0000071\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'UBERON'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ,\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a child term id of '[['UBERON:0000105']]'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'UBERON:0000071'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'UBERON:0000071' in 'development_stage_ontology_term_id' is not allowed'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n\n def test_ethnicity_ontology_term_id(self):\n \"\"\"\n ethnicity_ontology_term_id categorical with str categories.\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n Otherwise, for all other organisms this MUST be \"na\".\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'MmusDv:0000003'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be 'na'.\"\n ])\n\n def test_organism_ontology_term_id(self):\n \"\"\"\n organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'unknown'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed.\"\n ])\n\n def test_tissue_ontology_term_id_base(self):\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n\n def test_is_primary_data(self):\n \"\"\"\n is_primary_data\tbool. This MUST be True if this is the canonical instance of this cellular\n observation and False if not. This is commonly False\n for meta-analyses reusing data or for secondary views of data.\n \"\"\"\n self.validator.adata.obs['is_primary_data'] = 'FALSE'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'is_primary_data' in dataframe 'obs' must be boolean, not 'object'.\"\n ])\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n\n\nclass TestExpressionMatrix(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass TestObs(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_column_presence(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n \"\"\"\n columns = ['assay_ontology_term_id',\n 'development_stage_ontology_term_id',\n 'disease_ontology_term_id', 'ethnicity_ontology_term_id',\n 'is_primary_data', 'sex_ontology_term_id',\n 'tissue_ontology_term_id']\n for column in columns:\n with self.subTest(column=column):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n self.validator.adata.obs.drop(column, axis=1, inplace=True)\n self.validator.adata.uns.pop('batch_condition')\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe 'obs' is missing column '{column}'.\"])\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n\n def test_obsolete_term_id(self):\n \"\"\"\n Terms documented as obsolete in an ontology MUST NOT be used. For example, EFO:0009310\n for obsolete_10x v2 was marked as obsolete in EFO version 3.31.0 and replaced by\n EFO:0009899 for 10x 3' v2.\n\n https://www.ebi.ac.uk/ols/ontologies/efo/terms?short_form=EFO_0009310\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0009310'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is a deprecated term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_assay_ontology_term_id(self):\n \"\"\"\n assay_ontology_term_id categorical with str categories.\n This MUST be an EFO term and either child of \"EFO:0002772\" or \"EFO:0010183\"\n If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to\n the most accurate term. For example, the sci-plex assay could be curated as \"EFO:0010183 (sci-plex)\"\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'CL:000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0\n ] = 'EFO:0010183 sci-plex'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_cell_type_ontology_term_id(self):\n \"\"\"\n cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.\n \"\"\"\n self.validator.adata.obs['cell_type_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid ontology term id of 'CL'.\"\n ])\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_all_species(self):\n \"\"\"\n All other it MUST be children of UBERON:0000105 and not UBERON:0000071\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'UBERON'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ,\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a child term id of '[['UBERON:0000105']]'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'UBERON:0000071'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'UBERON:0000071' in 'development_stage_ontology_term_id' is not allowed'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n\n def test_ethnicity_ontology_term_id(self):\n \"\"\"\n ethnicity_ontology_term_id categorical with str categories.\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n Otherwise, for all other organisms this MUST be \"na\".\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'MmusDv:0000003'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be 'na'.\"\n ])\n\n def test_organism_ontology_term_id(self):\n \"\"\"\n organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'unknown'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed.\"\n ])\n\n def test_tissue_ontology_term_id_base(self):\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n\n def test_is_primary_data(self):\n \"\"\"\n is_primary_data\tbool. This MUST be True if this is the canonical instance of this cellular\n observation and False if not. This is commonly False\n for meta-analyses reusing data or for secondary views of data.\n \"\"\"\n self.validator.adata.obs['is_primary_data'] = 'FALSE'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'is_primary_data' in dataframe 'obs' must be boolean, not 'object'.\"\n ])\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestObs(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_column_presence(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n \"\"\"\n columns = ['assay_ontology_term_id',\n 'development_stage_ontology_term_id',\n 'disease_ontology_term_id', 'ethnicity_ontology_term_id',\n 'is_primary_data', 'sex_ontology_term_id',\n 'tissue_ontology_term_id']\n for column in columns:\n with self.subTest(column=column):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n self.validator.adata.obs.drop(column, axis=1, inplace=True)\n self.validator.adata.uns.pop('batch_condition')\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe 'obs' is missing column '{column}'.\"])\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n\n def test_obsolete_term_id(self):\n \"\"\"\n Terms documented as obsolete in an ontology MUST NOT be used. For example, EFO:0009310\n for obsolete_10x v2 was marked as obsolete in EFO version 3.31.0 and replaced by\n EFO:0009899 for 10x 3' v2.\n\n https://www.ebi.ac.uk/ols/ontologies/efo/terms?short_form=EFO_0009310\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0009310'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is a deprecated term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_assay_ontology_term_id(self):\n \"\"\"\n assay_ontology_term_id categorical with str categories.\n This MUST be an EFO term and either child of \"EFO:0002772\" or \"EFO:0010183\"\n If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to\n the most accurate term. For example, the sci-plex assay could be curated as \"EFO:0010183 (sci-plex)\"\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'CL:000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0\n ] = 'EFO:0010183 sci-plex'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_cell_type_ontology_term_id(self):\n \"\"\"\n cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.\n \"\"\"\n self.validator.adata.obs['cell_type_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid ontology term id of 'CL'.\"\n ])\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_all_species(self):\n \"\"\"\n All other it MUST be children of UBERON:0000105 and not UBERON:0000071\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'UBERON'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ,\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a child term id of '[['UBERON:0000105']]'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'UBERON:0000071'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'UBERON:0000071' in 'development_stage_ontology_term_id' is not allowed'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n\n def test_ethnicity_ontology_term_id(self):\n \"\"\"\n ethnicity_ontology_term_id categorical with str categories.\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n Otherwise, for all other organisms this MUST be \"na\".\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'MmusDv:0000003'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be 'na'.\"\n ])\n\n def test_organism_ontology_term_id(self):\n \"\"\"\n organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'unknown'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed.\"\n ])\n\n def test_tissue_ontology_term_id_base(self):\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n\n def test_is_primary_data(self):\n \"\"\"\n is_primary_data\tbool. This MUST be True if this is the canonical instance of this cellular\n observation and False if not. This is commonly False\n for meta-analyses reusing data or for secondary views of data.\n \"\"\"\n self.validator.adata.obs['is_primary_data'] = 'FALSE'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'is_primary_data' in dataframe 'obs' must be boolean, not 'object'.\"\n ])\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestObs(unittest.TestCase):\n <docstring token>\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_column_presence(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n \"\"\"\n columns = ['assay_ontology_term_id',\n 'development_stage_ontology_term_id',\n 'disease_ontology_term_id', 'ethnicity_ontology_term_id',\n 'is_primary_data', 'sex_ontology_term_id',\n 'tissue_ontology_term_id']\n for column in columns:\n with self.subTest(column=column):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n self.validator.adata.obs.drop(column, axis=1, inplace=True)\n self.validator.adata.uns.pop('batch_condition')\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe 'obs' is missing column '{column}'.\"])\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n\n def test_obsolete_term_id(self):\n \"\"\"\n Terms documented as obsolete in an ontology MUST NOT be used. For example, EFO:0009310\n for obsolete_10x v2 was marked as obsolete in EFO version 3.31.0 and replaced by\n EFO:0009899 for 10x 3' v2.\n\n https://www.ebi.ac.uk/ols/ontologies/efo/terms?short_form=EFO_0009310\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0009310'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is a deprecated term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_assay_ontology_term_id(self):\n \"\"\"\n assay_ontology_term_id categorical with str categories.\n This MUST be an EFO term and either child of \"EFO:0002772\" or \"EFO:0010183\"\n If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to\n the most accurate term. For example, the sci-plex assay could be curated as \"EFO:0010183 (sci-plex)\"\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'CL:000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0\n ] = 'EFO:0010183 sci-plex'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_cell_type_ontology_term_id(self):\n \"\"\"\n cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.\n \"\"\"\n self.validator.adata.obs['cell_type_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid ontology term id of 'CL'.\"\n ])\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_all_species(self):\n \"\"\"\n All other it MUST be children of UBERON:0000105 and not UBERON:0000071\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'UBERON'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ,\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a child term id of '[['UBERON:0000105']]'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'UBERON:0000071'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'UBERON:0000071' in 'development_stage_ontology_term_id' is not allowed'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n\n def test_ethnicity_ontology_term_id(self):\n \"\"\"\n ethnicity_ontology_term_id categorical with str categories.\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n Otherwise, for all other organisms this MUST be \"na\".\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'MmusDv:0000003'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be 'na'.\"\n ])\n\n def test_organism_ontology_term_id(self):\n \"\"\"\n organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'unknown'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed.\"\n ])\n\n def test_tissue_ontology_term_id_base(self):\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n\n def test_is_primary_data(self):\n \"\"\"\n is_primary_data\tbool. This MUST be True if this is the canonical instance of this cellular\n observation and False if not. This is commonly False\n for meta-analyses reusing data or for secondary views of data.\n \"\"\"\n self.validator.adata.obs['is_primary_data'] = 'FALSE'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'is_primary_data' in dataframe 'obs' must be boolean, not 'object'.\"\n ])\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestObs(unittest.TestCase):\n <docstring token>\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_column_presence(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n \"\"\"\n columns = ['assay_ontology_term_id',\n 'development_stage_ontology_term_id',\n 'disease_ontology_term_id', 'ethnicity_ontology_term_id',\n 'is_primary_data', 'sex_ontology_term_id',\n 'tissue_ontology_term_id']\n for column in columns:\n with self.subTest(column=column):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n self.validator.adata.obs.drop(column, axis=1, inplace=True)\n self.validator.adata.uns.pop('batch_condition')\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe 'obs' is missing column '{column}'.\"])\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n\n def test_obsolete_term_id(self):\n \"\"\"\n Terms documented as obsolete in an ontology MUST NOT be used. For example, EFO:0009310\n for obsolete_10x v2 was marked as obsolete in EFO version 3.31.0 and replaced by\n EFO:0009899 for 10x 3' v2.\n\n https://www.ebi.ac.uk/ols/ontologies/efo/terms?short_form=EFO_0009310\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0009310'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is a deprecated term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_assay_ontology_term_id(self):\n \"\"\"\n assay_ontology_term_id categorical with str categories.\n This MUST be an EFO term and either child of \"EFO:0002772\" or \"EFO:0010183\"\n If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to\n the most accurate term. For example, the sci-plex assay could be curated as \"EFO:0010183 (sci-plex)\"\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'CL:000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0\n ] = 'EFO:0010183 sci-plex'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_cell_type_ontology_term_id(self):\n \"\"\"\n cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.\n \"\"\"\n self.validator.adata.obs['cell_type_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid ontology term id of 'CL'.\"\n ])\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n <function token>\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n\n def test_ethnicity_ontology_term_id(self):\n \"\"\"\n ethnicity_ontology_term_id categorical with str categories.\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n Otherwise, for all other organisms this MUST be \"na\".\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'MmusDv:0000003'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be 'na'.\"\n ])\n\n def test_organism_ontology_term_id(self):\n \"\"\"\n organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'unknown'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed.\"\n ])\n\n def test_tissue_ontology_term_id_base(self):\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n\n def test_is_primary_data(self):\n \"\"\"\n is_primary_data\tbool. This MUST be True if this is the canonical instance of this cellular\n observation and False if not. This is commonly False\n for meta-analyses reusing data or for secondary views of data.\n \"\"\"\n self.validator.adata.obs['is_primary_data'] = 'FALSE'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'is_primary_data' in dataframe 'obs' must be boolean, not 'object'.\"\n ])\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestObs(unittest.TestCase):\n <docstring token>\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n <function token>\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n\n def test_obsolete_term_id(self):\n \"\"\"\n Terms documented as obsolete in an ontology MUST NOT be used. For example, EFO:0009310\n for obsolete_10x v2 was marked as obsolete in EFO version 3.31.0 and replaced by\n EFO:0009899 for 10x 3' v2.\n\n https://www.ebi.ac.uk/ols/ontologies/efo/terms?short_form=EFO_0009310\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0009310'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is a deprecated term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_assay_ontology_term_id(self):\n \"\"\"\n assay_ontology_term_id categorical with str categories.\n This MUST be an EFO term and either child of \"EFO:0002772\" or \"EFO:0010183\"\n If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to\n the most accurate term. For example, the sci-plex assay could be curated as \"EFO:0010183 (sci-plex)\"\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'CL:000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0\n ] = 'EFO:0010183 sci-plex'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_cell_type_ontology_term_id(self):\n \"\"\"\n cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.\n \"\"\"\n self.validator.adata.obs['cell_type_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid ontology term id of 'CL'.\"\n ])\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n <function token>\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n\n def test_ethnicity_ontology_term_id(self):\n \"\"\"\n ethnicity_ontology_term_id categorical with str categories.\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n Otherwise, for all other organisms this MUST be \"na\".\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'MmusDv:0000003'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be 'na'.\"\n ])\n\n def test_organism_ontology_term_id(self):\n \"\"\"\n organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'unknown'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed.\"\n ])\n\n def test_tissue_ontology_term_id_base(self):\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n\n def test_is_primary_data(self):\n \"\"\"\n is_primary_data\tbool. This MUST be True if this is the canonical instance of this cellular\n observation and False if not. This is commonly False\n for meta-analyses reusing data or for secondary views of data.\n \"\"\"\n self.validator.adata.obs['is_primary_data'] = 'FALSE'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'is_primary_data' in dataframe 'obs' must be boolean, not 'object'.\"\n ])\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestObs(unittest.TestCase):\n <docstring token>\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n <function token>\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n\n def test_obsolete_term_id(self):\n \"\"\"\n Terms documented as obsolete in an ontology MUST NOT be used. For example, EFO:0009310\n for obsolete_10x v2 was marked as obsolete in EFO version 3.31.0 and replaced by\n EFO:0009899 for 10x 3' v2.\n\n https://www.ebi.ac.uk/ols/ontologies/efo/terms?short_form=EFO_0009310\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0009310'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is a deprecated term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_assay_ontology_term_id(self):\n \"\"\"\n assay_ontology_term_id categorical with str categories.\n This MUST be an EFO term and either child of \"EFO:0002772\" or \"EFO:0010183\"\n If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to\n the most accurate term. For example, the sci-plex assay could be curated as \"EFO:0010183 (sci-plex)\"\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'CL:000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0\n ] = 'EFO:0010183 sci-plex'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_cell_type_ontology_term_id(self):\n \"\"\"\n cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.\n \"\"\"\n self.validator.adata.obs['cell_type_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid ontology term id of 'CL'.\"\n ])\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n <function token>\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n\n def test_ethnicity_ontology_term_id(self):\n \"\"\"\n ethnicity_ontology_term_id categorical with str categories.\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n Otherwise, for all other organisms this MUST be \"na\".\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'MmusDv:0000003'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be 'na'.\"\n ])\n\n def test_organism_ontology_term_id(self):\n \"\"\"\n organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'unknown'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed.\"\n ])\n\n def test_tissue_ontology_term_id_base(self):\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n <function token>\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestObs(unittest.TestCase):\n <docstring token>\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n <function token>\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n <function token>\n\n def test_assay_ontology_term_id(self):\n \"\"\"\n assay_ontology_term_id categorical with str categories.\n This MUST be an EFO term and either child of \"EFO:0002772\" or \"EFO:0010183\"\n If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to\n the most accurate term. For example, the sci-plex assay could be curated as \"EFO:0010183 (sci-plex)\"\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'CL:000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0\n ] = 'EFO:0010183 sci-plex'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_cell_type_ontology_term_id(self):\n \"\"\"\n cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.\n \"\"\"\n self.validator.adata.obs['cell_type_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid ontology term id of 'CL'.\"\n ])\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n <function token>\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n\n def test_ethnicity_ontology_term_id(self):\n \"\"\"\n ethnicity_ontology_term_id categorical with str categories.\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n Otherwise, for all other organisms this MUST be \"na\".\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'MmusDv:0000003'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be 'na'.\"\n ])\n\n def test_organism_ontology_term_id(self):\n \"\"\"\n organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'unknown'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed.\"\n ])\n\n def test_tissue_ontology_term_id_base(self):\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n <function token>\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestObs(unittest.TestCase):\n <docstring token>\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n <function token>\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n <function token>\n\n def test_assay_ontology_term_id(self):\n \"\"\"\n assay_ontology_term_id categorical with str categories.\n This MUST be an EFO term and either child of \"EFO:0002772\" or \"EFO:0010183\"\n If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to\n the most accurate term. For example, the sci-plex assay could be curated as \"EFO:0010183 (sci-plex)\"\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'CL:000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0\n ] = 'EFO:0010183 sci-plex'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_cell_type_ontology_term_id(self):\n \"\"\"\n cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.\n \"\"\"\n self.validator.adata.obs['cell_type_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid ontology term id of 'CL'.\"\n ])\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n <function token>\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n\n def test_ethnicity_ontology_term_id(self):\n \"\"\"\n ethnicity_ontology_term_id categorical with str categories.\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n Otherwise, for all other organisms this MUST be \"na\".\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'MmusDv:0000003'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be 'na'.\"\n ])\n <function token>\n\n def test_tissue_ontology_term_id_base(self):\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n <function token>\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestObs(unittest.TestCase):\n <docstring token>\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n <function token>\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n <function token>\n <function token>\n\n def test_cell_type_ontology_term_id(self):\n \"\"\"\n cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.\n \"\"\"\n self.validator.adata.obs['cell_type_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid ontology term id of 'CL'.\"\n ])\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n <function token>\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n\n def test_ethnicity_ontology_term_id(self):\n \"\"\"\n ethnicity_ontology_term_id categorical with str categories.\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n Otherwise, for all other organisms this MUST be \"na\".\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'MmusDv:0000003'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be 'na'.\"\n ])\n <function token>\n\n def test_tissue_ontology_term_id_base(self):\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n <function token>\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestObs(unittest.TestCase):\n <docstring token>\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n <function token>\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n <function token>\n <function token>\n\n def test_cell_type_ontology_term_id(self):\n \"\"\"\n cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.\n \"\"\"\n self.validator.adata.obs['cell_type_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid ontology term id of 'CL'.\"\n ])\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n <function token>\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n <function token>\n <function token>\n\n def test_tissue_ontology_term_id_base(self):\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n <function token>\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestObs(unittest.TestCase):\n <docstring token>\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n <function token>\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n <function token>\n <function token>\n <function token>\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n <function token>\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n <function token>\n <function token>\n\n def test_tissue_ontology_term_id_base(self):\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n <function token>\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestObs(unittest.TestCase):\n <docstring token>\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n <function token>\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n <function token>\n <function token>\n <function token>\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n <function token>\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n <function token>\n <function token>\n <function token>\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n <function token>\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestObs(unittest.TestCase):\n <docstring token>\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n <function token>\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n <function token>\n <function token>\n <function token>\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n <function token>\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n <function token>\n <function token>\n <function token>\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n <function token>\n <function token>\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestObs(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n <function token>\n <function token>\n <function token>\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n <function token>\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n <function token>\n <function token>\n <function token>\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n <function token>\n <function token>\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestObs(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n <function token>\n <function token>\n <function token>\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n <function token>\n <function token>\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n <function token>\n <function token>\n <function token>\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n <function token>\n <function token>\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestObs(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n <function token>\n <function token>\n <function token>\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n <function token>\n <function token>\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n <function token>\n <function token>\n <function token>\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n <function token>\n <function token>\n <function token>\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestObs(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n <function token>\n <function token>\n <function token>\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n <function token>\n <function token>\n <function token>\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestObs(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n <function token>\n <function token>\n <function token>\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestObs(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n <function token>\n <function token>\n <function token>\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestObs(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestVar(unittest.TestCase):\n <docstring token>\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestVar(unittest.TestCase):\n <docstring token>\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n <function token>\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestVar(unittest.TestCase):\n <docstring token>\n <function token>\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n <function token>\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestVar(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestVar(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestVar(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n <function token>\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestVar(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n <function token>\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n <function token>\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestVar(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n <function token>\n <function token>\n <function token>\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestVar(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n <function token>\n <function token>\n <function token>\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestVar(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestUns(unittest.TestCase):\n <docstring token>\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestUns(unittest.TestCase):\n <docstring token>\n <function token>\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestUns(unittest.TestCase):\n <docstring token>\n <function token>\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n <function token>\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestUns(unittest.TestCase):\n <docstring token>\n <function token>\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n <function token>\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n <function token>\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestUns(unittest.TestCase):\n <docstring token>\n <function token>\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n <function token>\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n <function token>\n <function token>\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestUns(unittest.TestCase):\n <docstring token>\n <function token>\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n <function token>\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n <function token>\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n <function token>\n <function token>\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestUns(unittest.TestCase):\n <docstring token>\n <function token>\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n <function token>\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n <function token>\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n <function token>\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n <function token>\n <function token>\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestUns(unittest.TestCase):\n <docstring token>\n <function token>\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n <function token>\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n <function token>\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n <function token>\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n <function token>\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n <function token>\n <function token>\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestUns(unittest.TestCase):\n <docstring token>\n <function token>\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n <function token>\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n <function token>\n <function token>\n <function token>\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n <function token>\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n <function token>\n <function token>\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestUns(unittest.TestCase):\n <docstring token>\n <function token>\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n <function token>\n <function token>\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n <function token>\n <function token>\n <function token>\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n <function token>\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n <function token>\n <function token>\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestUns(unittest.TestCase):\n <docstring token>\n <function token>\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n <function token>\n <function token>\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n <function token>\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n <function token>\n <function token>\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestUns(unittest.TestCase):\n <docstring token>\n <function token>\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n <function token>\n <function token>\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestUns(unittest.TestCase):\n <docstring token>\n <function token>\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n <function token>\n <function token>\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestUns(unittest.TestCase):\n <docstring token>\n <function token>\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestUns(unittest.TestCase):\n <docstring token>\n <function token>\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestUns(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestObsm(unittest.TestCase):\n <docstring token>\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestObsm(unittest.TestCase):\n <docstring token>\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n <function token>\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestObsm(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestObsm(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestObsm(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestAddingLabels(unittest.TestCase):\n <docstring token>\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestAddingLabels(unittest.TestCase):\n <docstring token>\n <function token>\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestAddingLabels(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TestAddingLabels(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n <function token>\n", "<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n" ]
false
355
ad3a7221883a847fc9d26097c3801973cbbda38e
from django.urls import path,include from Income import views urlpatterns = [ path('IncomeHome/',views.IncomeHome,name='IncomeHome'), path('IncomeCreate/',views.IncomeCreate.as_view(),name='IncomeCreate'), path('IncomeUpdate/<int:pk>',views.IncomeUpdate.as_view(),name='IncomeUpdate'), path('IncomeDelete/<int:pk>',views.IncomeDelete.as_view(),name='IncomeDelete'), path('Income/',views.IncomeView.as_view(),name='Income'), ]
[ "\nfrom django.urls import path,include\n\nfrom Income import views\n\nurlpatterns = [\n path('IncomeHome/',views.IncomeHome,name='IncomeHome'),\n path('IncomeCreate/',views.IncomeCreate.as_view(),name='IncomeCreate'),\n path('IncomeUpdate/<int:pk>',views.IncomeUpdate.as_view(),name='IncomeUpdate'),\n path('IncomeDelete/<int:pk>',views.IncomeDelete.as_view(),name='IncomeDelete'),\n path('Income/',views.IncomeView.as_view(),name='Income'),\n\n]\n", "from django.urls import path, include\nfrom Income import views\nurlpatterns = [path('IncomeHome/', views.IncomeHome, name='IncomeHome'),\n path('IncomeCreate/', views.IncomeCreate.as_view(), name='IncomeCreate'\n ), path('IncomeUpdate/<int:pk>', views.IncomeUpdate.as_view(), name=\n 'IncomeUpdate'), path('IncomeDelete/<int:pk>', views.IncomeDelete.\n as_view(), name='IncomeDelete'), path('Income/', views.IncomeView.\n as_view(), name='Income')]\n", "<import token>\nurlpatterns = [path('IncomeHome/', views.IncomeHome, name='IncomeHome'),\n path('IncomeCreate/', views.IncomeCreate.as_view(), name='IncomeCreate'\n ), path('IncomeUpdate/<int:pk>', views.IncomeUpdate.as_view(), name=\n 'IncomeUpdate'), path('IncomeDelete/<int:pk>', views.IncomeDelete.\n as_view(), name='IncomeDelete'), path('Income/', views.IncomeView.\n as_view(), name='Income')]\n", "<import token>\n<assignment token>\n" ]
false
356
9e7dee9c0fd4cd290f4710649ffc4a94fedf0358
import os pil = 'y' while(pil=='y'): os.system("cls") print("===============================") print("== KALKULATOR SEDERHANA ==") print("===============================") print("MENU-UTAMA : ") print("1 Penjumlahan") print("2 Pengurangan") print("3 Perkalian") print("4 Pembagian") def penjumlahan (): print("PENJUMLAHAN DUA BUAH BILANGAN") print("=============================") x = float(input ("Bilangan pertama: ")) y = float(input ("Bilangan kedua : ")) print("-----------------------------") print "Jumlah = ", x+y def pengurangan (): print("PENGURANGAN DUA BUAH BILANGAN") print("=============================") x = float(input("Bilangan pertama: ")) y = float(input("Bilangan kedua : ")) print("-----------------------------") print "Jumlah = ", x-y def perkalian (): print("PERKALIAN DUA BUAH BILANGAN") print("===========================") x = float(input("Bilangan pertama: ")) y = float(input("Bilangan kedua : ")) print("---------------------------") print "Jumlah = ", x*y def pembagian (): print("PEMBAGIAN DUA BUAH BILANGAN") print("===========================") x = float(input("Bilangan pertama: ")) y = float(input("Bilangan kedua : ")) print("---------------------------") print "Jumlah = ", x/y pilihan = int(input("Masukkan pilihan Anda(1,2,3, dan 4): ")) if (pilihan==1): penjumlahan () elif (pilihan==2): pengurangan () elif (pilihan==3): perkalian () elif (pilihan==4): pembagian () else: print("Pilihan Anda salah") pil = raw_input("ulang KALKULATOR lagi? (y): ")
[ "import os\npil = 'y'\nwhile(pil=='y'):\n os.system(\"cls\")\n print(\"===============================\")\n print(\"== KALKULATOR SEDERHANA ==\")\n print(\"===============================\")\n print(\"MENU-UTAMA : \")\n print(\"1 Penjumlahan\")\n print(\"2 Pengurangan\")\n print(\"3 Perkalian\")\n print(\"4 Pembagian\")\n\n def penjumlahan ():\n print(\"PENJUMLAHAN DUA BUAH BILANGAN\")\n print(\"=============================\")\n x = float(input (\"Bilangan pertama: \"))\n y = float(input (\"Bilangan kedua : \"))\n print(\"-----------------------------\")\n print \"Jumlah = \", x+y\n def pengurangan ():\n print(\"PENGURANGAN DUA BUAH BILANGAN\")\n print(\"=============================\")\n x = float(input(\"Bilangan pertama: \"))\n y = float(input(\"Bilangan kedua : \"))\n print(\"-----------------------------\")\n print \"Jumlah = \", x-y\n def perkalian ():\n print(\"PERKALIAN DUA BUAH BILANGAN\")\n print(\"===========================\")\n x = float(input(\"Bilangan pertama: \"))\n y = float(input(\"Bilangan kedua : \"))\n print(\"---------------------------\")\n print \"Jumlah = \", x*y\n def pembagian ():\n print(\"PEMBAGIAN DUA BUAH BILANGAN\")\n print(\"===========================\")\n x = float(input(\"Bilangan pertama: \"))\n y = float(input(\"Bilangan kedua : \"))\n print(\"---------------------------\")\n print \"Jumlah = \", x/y\n pilihan = int(input(\"Masukkan pilihan Anda(1,2,3, dan 4): \"))\n if (pilihan==1):\n penjumlahan ()\n elif (pilihan==2):\n pengurangan ()\n elif (pilihan==3):\n perkalian ()\n elif (pilihan==4):\n pembagian ()\n else:\n print(\"Pilihan Anda salah\")\n pil = raw_input(\"ulang KALKULATOR lagi? (y): \")\n" ]
true
357
180f7f0ade9770c6669680bd13ac8f2fd55cc8c7
def raizCubica(numero): r = pow(numero,(1/3)) return r numeros = [] raices = [] for x in range(5): numeros.insert(x, float(input("Ingrese Numero: "))) raices.insert(x, round(raizCubica(numeros[x]),3)) print("Numeros: ", numeros) print("Raices: ", raices)
[ "def raizCubica(numero):\n r = pow(numero,(1/3))\n return r\n\nnumeros = []\nraices = []\n\nfor x in range(5):\n numeros.insert(x, float(input(\"Ingrese Numero: \")))\n raices.insert(x, round(raizCubica(numeros[x]),3))\n\nprint(\"Numeros: \", numeros)\nprint(\"Raices: \", raices)", "def raizCubica(numero):\n r = pow(numero, 1 / 3)\n return r\n\n\nnumeros = []\nraices = []\nfor x in range(5):\n numeros.insert(x, float(input('Ingrese Numero: ')))\n raices.insert(x, round(raizCubica(numeros[x]), 3))\nprint('Numeros: ', numeros)\nprint('Raices: ', raices)\n", "def raizCubica(numero):\n r = pow(numero, 1 / 3)\n return r\n\n\n<assignment token>\nfor x in range(5):\n numeros.insert(x, float(input('Ingrese Numero: ')))\n raices.insert(x, round(raizCubica(numeros[x]), 3))\nprint('Numeros: ', numeros)\nprint('Raices: ', raices)\n", "def raizCubica(numero):\n r = pow(numero, 1 / 3)\n return r\n\n\n<assignment token>\n<code token>\n", "<function token>\n<assignment token>\n<code token>\n" ]
false
358
97ebdeada3d797a971b5c3851b75f9754595f67c
""" Python package setup file. """ from setuptools import setup setup( name="TF_Speech", version="0.2.0", extras_require={'tensorflow': ['tensorflow'], 'tensorflow with gpu': ['tensorflow-gpu']}, )
[ "\"\"\"\nPython package setup file.\n\"\"\"\n\nfrom setuptools import setup\n\nsetup(\n name=\"TF_Speech\",\n version=\"0.2.0\",\n extras_require={'tensorflow': ['tensorflow'],\n 'tensorflow with gpu': ['tensorflow-gpu']},\n)\n", "<docstring token>\nfrom setuptools import setup\nsetup(name='TF_Speech', version='0.2.0', extras_require={'tensorflow': [\n 'tensorflow'], 'tensorflow with gpu': ['tensorflow-gpu']})\n", "<docstring token>\n<import token>\nsetup(name='TF_Speech', version='0.2.0', extras_require={'tensorflow': [\n 'tensorflow'], 'tensorflow with gpu': ['tensorflow-gpu']})\n", "<docstring token>\n<import token>\n<code token>\n" ]
false
359
9f34bf3a0bb24db428b7af1a354aec1d3a72df98
from random import randrange from django.core.exceptions import ValidationError from django.contrib.auth import get_user_model from rest_framework import serializers from rest_framework_simplejwt.serializers import TokenObtainPairSerializer from .models import EmailValidation from ..emails.models import Email from ..users.serializers import FullUserSerializer User = get_user_model() def user_with_email_not_existing(email): try: User.objects.get(email=email) raise ValidationError(message='This email is taken') except User.DoesNotExist: return email def email_does_exist(email): try: User.objects.get(email=email) return email except User.DoesNotExist: raise ValidationError(message='User does not exist!') class CreatePasswordEmailValidationSerializer(serializers.Serializer): email = serializers.EmailField(validators=[email_does_exist]) def save(self): validation_code = randrange(10000000, 100000000) email = Email.objects.create( validation_code=validation_code, to=self.validated_data.get('email'), type=self.validated_data.get('type') ) new_validation = EmailValidation.objects.create( validation_code=validation_code, email=email, type=self.validated_data.get('type')) return new_validation class CreateEmailValidationSerializer(serializers.Serializer): email = serializers.EmailField(validators=[user_with_email_not_existing]) def save(self): validation_code = randrange(10000000, 100000000) email = Email.objects.create( validation_code=validation_code, to=self.validated_data.get('email'), type=self.validated_data.get('type') ) new_validation = EmailValidation.objects.create( validation_code=validation_code, email=email, type=self.validated_data.get('type')) return new_validation class EmailSerializer(serializers.ModelSerializer): email = serializers.EmailField() class Meta: model = EmailValidation fields = ['email'] class EmailValidationSerializer(serializers.ModelSerializer): email = serializers.EmailField() class Meta: model = EmailValidation fields = ['email', 'validation_code'] class EmailValidationPasswordSerializer(serializers.ModelSerializer): email = serializers.EmailField() password = serializers.CharField(max_length=200) class Meta: model = EmailValidation fields = ['email', 'validation_code', 'password'] class NewUserSerializer(serializers.ModelSerializer): email = serializers.EmailField() class Meta: model = EmailValidation fields = ['email'] class TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer): def validate(self, attrs): data = super().validate(attrs) refresh = self.get_token(self.user) data['refresh'] = str(refresh) data['access'] = str(refresh.access_token) data['user'] = FullUserSerializer(self.user).data return data
[ "from random import randrange\r\n\r\nfrom django.core.exceptions import ValidationError\r\nfrom django.contrib.auth import get_user_model\r\n\r\nfrom rest_framework import serializers\r\nfrom rest_framework_simplejwt.serializers import TokenObtainPairSerializer\r\n\r\nfrom .models import EmailValidation\r\nfrom ..emails.models import Email\r\nfrom ..users.serializers import FullUserSerializer\r\n\r\nUser = get_user_model()\r\n\r\n\r\ndef user_with_email_not_existing(email):\r\n try:\r\n User.objects.get(email=email)\r\n raise ValidationError(message='This email is taken')\r\n except User.DoesNotExist:\r\n return email\r\n\r\n\r\ndef email_does_exist(email):\r\n try:\r\n User.objects.get(email=email)\r\n return email\r\n except User.DoesNotExist:\r\n raise ValidationError(message='User does not exist!')\r\n\r\n\r\nclass CreatePasswordEmailValidationSerializer(serializers.Serializer):\r\n email = serializers.EmailField(validators=[email_does_exist])\r\n\r\n def save(self):\r\n validation_code = randrange(10000000, 100000000)\r\n email = Email.objects.create(\r\n validation_code=validation_code,\r\n to=self.validated_data.get('email'),\r\n type=self.validated_data.get('type')\r\n )\r\n new_validation = EmailValidation.objects.create(\r\n validation_code=validation_code,\r\n email=email,\r\n type=self.validated_data.get('type'))\r\n return new_validation\r\n\r\n\r\nclass CreateEmailValidationSerializer(serializers.Serializer):\r\n email = serializers.EmailField(validators=[user_with_email_not_existing])\r\n\r\n def save(self):\r\n validation_code = randrange(10000000, 100000000)\r\n email = Email.objects.create(\r\n validation_code=validation_code,\r\n to=self.validated_data.get('email'),\r\n type=self.validated_data.get('type')\r\n )\r\n new_validation = EmailValidation.objects.create(\r\n validation_code=validation_code,\r\n email=email,\r\n type=self.validated_data.get('type'))\r\n return new_validation\r\n\r\n\r\nclass EmailSerializer(serializers.ModelSerializer):\r\n email = serializers.EmailField()\r\n\r\n class Meta:\r\n model = EmailValidation\r\n fields = ['email']\r\n\r\n\r\nclass EmailValidationSerializer(serializers.ModelSerializer):\r\n email = serializers.EmailField()\r\n\r\n class Meta:\r\n model = EmailValidation\r\n fields = ['email', 'validation_code']\r\n\r\n\r\nclass EmailValidationPasswordSerializer(serializers.ModelSerializer):\r\n email = serializers.EmailField()\r\n password = serializers.CharField(max_length=200)\r\n\r\n class Meta:\r\n model = EmailValidation\r\n fields = ['email', 'validation_code', 'password']\r\n\r\n\r\nclass NewUserSerializer(serializers.ModelSerializer):\r\n email = serializers.EmailField()\r\n\r\n class Meta:\r\n model = EmailValidation\r\n fields = ['email']\r\n\r\n\r\nclass TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):\r\n def validate(self, attrs):\r\n data = super().validate(attrs)\r\n\r\n refresh = self.get_token(self.user)\r\n\r\n data['refresh'] = str(refresh)\r\n data['access'] = str(refresh.access_token)\r\n\r\n data['user'] = FullUserSerializer(self.user).data\r\n return data\r\n", "from random import randrange\nfrom django.core.exceptions import ValidationError\nfrom django.contrib.auth import get_user_model\nfrom rest_framework import serializers\nfrom rest_framework_simplejwt.serializers import TokenObtainPairSerializer\nfrom .models import EmailValidation\nfrom ..emails.models import Email\nfrom ..users.serializers import FullUserSerializer\nUser = get_user_model()\n\n\ndef user_with_email_not_existing(email):\n try:\n User.objects.get(email=email)\n raise ValidationError(message='This email is taken')\n except User.DoesNotExist:\n return email\n\n\ndef email_does_exist(email):\n try:\n User.objects.get(email=email)\n return email\n except User.DoesNotExist:\n raise ValidationError(message='User does not exist!')\n\n\nclass CreatePasswordEmailValidationSerializer(serializers.Serializer):\n email = serializers.EmailField(validators=[email_does_exist])\n\n def save(self):\n validation_code = randrange(10000000, 100000000)\n email = Email.objects.create(validation_code=validation_code, to=\n self.validated_data.get('email'), type=self.validated_data.get(\n 'type'))\n new_validation = EmailValidation.objects.create(validation_code=\n validation_code, email=email, type=self.validated_data.get('type'))\n return new_validation\n\n\nclass CreateEmailValidationSerializer(serializers.Serializer):\n email = serializers.EmailField(validators=[user_with_email_not_existing])\n\n def save(self):\n validation_code = randrange(10000000, 100000000)\n email = Email.objects.create(validation_code=validation_code, to=\n self.validated_data.get('email'), type=self.validated_data.get(\n 'type'))\n new_validation = EmailValidation.objects.create(validation_code=\n validation_code, email=email, type=self.validated_data.get('type'))\n return new_validation\n\n\nclass EmailSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass EmailValidationSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code']\n\n\nclass EmailValidationPasswordSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n password = serializers.CharField(max_length=200)\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code', 'password']\n\n\nclass NewUserSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):\n\n def validate(self, attrs):\n data = super().validate(attrs)\n refresh = self.get_token(self.user)\n data['refresh'] = str(refresh)\n data['access'] = str(refresh.access_token)\n data['user'] = FullUserSerializer(self.user).data\n return data\n", "<import token>\nUser = get_user_model()\n\n\ndef user_with_email_not_existing(email):\n try:\n User.objects.get(email=email)\n raise ValidationError(message='This email is taken')\n except User.DoesNotExist:\n return email\n\n\ndef email_does_exist(email):\n try:\n User.objects.get(email=email)\n return email\n except User.DoesNotExist:\n raise ValidationError(message='User does not exist!')\n\n\nclass CreatePasswordEmailValidationSerializer(serializers.Serializer):\n email = serializers.EmailField(validators=[email_does_exist])\n\n def save(self):\n validation_code = randrange(10000000, 100000000)\n email = Email.objects.create(validation_code=validation_code, to=\n self.validated_data.get('email'), type=self.validated_data.get(\n 'type'))\n new_validation = EmailValidation.objects.create(validation_code=\n validation_code, email=email, type=self.validated_data.get('type'))\n return new_validation\n\n\nclass CreateEmailValidationSerializer(serializers.Serializer):\n email = serializers.EmailField(validators=[user_with_email_not_existing])\n\n def save(self):\n validation_code = randrange(10000000, 100000000)\n email = Email.objects.create(validation_code=validation_code, to=\n self.validated_data.get('email'), type=self.validated_data.get(\n 'type'))\n new_validation = EmailValidation.objects.create(validation_code=\n validation_code, email=email, type=self.validated_data.get('type'))\n return new_validation\n\n\nclass EmailSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass EmailValidationSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code']\n\n\nclass EmailValidationPasswordSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n password = serializers.CharField(max_length=200)\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code', 'password']\n\n\nclass NewUserSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):\n\n def validate(self, attrs):\n data = super().validate(attrs)\n refresh = self.get_token(self.user)\n data['refresh'] = str(refresh)\n data['access'] = str(refresh.access_token)\n data['user'] = FullUserSerializer(self.user).data\n return data\n", "<import token>\n<assignment token>\n\n\ndef user_with_email_not_existing(email):\n try:\n User.objects.get(email=email)\n raise ValidationError(message='This email is taken')\n except User.DoesNotExist:\n return email\n\n\ndef email_does_exist(email):\n try:\n User.objects.get(email=email)\n return email\n except User.DoesNotExist:\n raise ValidationError(message='User does not exist!')\n\n\nclass CreatePasswordEmailValidationSerializer(serializers.Serializer):\n email = serializers.EmailField(validators=[email_does_exist])\n\n def save(self):\n validation_code = randrange(10000000, 100000000)\n email = Email.objects.create(validation_code=validation_code, to=\n self.validated_data.get('email'), type=self.validated_data.get(\n 'type'))\n new_validation = EmailValidation.objects.create(validation_code=\n validation_code, email=email, type=self.validated_data.get('type'))\n return new_validation\n\n\nclass CreateEmailValidationSerializer(serializers.Serializer):\n email = serializers.EmailField(validators=[user_with_email_not_existing])\n\n def save(self):\n validation_code = randrange(10000000, 100000000)\n email = Email.objects.create(validation_code=validation_code, to=\n self.validated_data.get('email'), type=self.validated_data.get(\n 'type'))\n new_validation = EmailValidation.objects.create(validation_code=\n validation_code, email=email, type=self.validated_data.get('type'))\n return new_validation\n\n\nclass EmailSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass EmailValidationSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code']\n\n\nclass EmailValidationPasswordSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n password = serializers.CharField(max_length=200)\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code', 'password']\n\n\nclass NewUserSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):\n\n def validate(self, attrs):\n data = super().validate(attrs)\n refresh = self.get_token(self.user)\n data['refresh'] = str(refresh)\n data['access'] = str(refresh.access_token)\n data['user'] = FullUserSerializer(self.user).data\n return data\n", "<import token>\n<assignment token>\n\n\ndef user_with_email_not_existing(email):\n try:\n User.objects.get(email=email)\n raise ValidationError(message='This email is taken')\n except User.DoesNotExist:\n return email\n\n\n<function token>\n\n\nclass CreatePasswordEmailValidationSerializer(serializers.Serializer):\n email = serializers.EmailField(validators=[email_does_exist])\n\n def save(self):\n validation_code = randrange(10000000, 100000000)\n email = Email.objects.create(validation_code=validation_code, to=\n self.validated_data.get('email'), type=self.validated_data.get(\n 'type'))\n new_validation = EmailValidation.objects.create(validation_code=\n validation_code, email=email, type=self.validated_data.get('type'))\n return new_validation\n\n\nclass CreateEmailValidationSerializer(serializers.Serializer):\n email = serializers.EmailField(validators=[user_with_email_not_existing])\n\n def save(self):\n validation_code = randrange(10000000, 100000000)\n email = Email.objects.create(validation_code=validation_code, to=\n self.validated_data.get('email'), type=self.validated_data.get(\n 'type'))\n new_validation = EmailValidation.objects.create(validation_code=\n validation_code, email=email, type=self.validated_data.get('type'))\n return new_validation\n\n\nclass EmailSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass EmailValidationSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code']\n\n\nclass EmailValidationPasswordSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n password = serializers.CharField(max_length=200)\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code', 'password']\n\n\nclass NewUserSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):\n\n def validate(self, attrs):\n data = super().validate(attrs)\n refresh = self.get_token(self.user)\n data['refresh'] = str(refresh)\n data['access'] = str(refresh.access_token)\n data['user'] = FullUserSerializer(self.user).data\n return data\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n\n\nclass CreatePasswordEmailValidationSerializer(serializers.Serializer):\n email = serializers.EmailField(validators=[email_does_exist])\n\n def save(self):\n validation_code = randrange(10000000, 100000000)\n email = Email.objects.create(validation_code=validation_code, to=\n self.validated_data.get('email'), type=self.validated_data.get(\n 'type'))\n new_validation = EmailValidation.objects.create(validation_code=\n validation_code, email=email, type=self.validated_data.get('type'))\n return new_validation\n\n\nclass CreateEmailValidationSerializer(serializers.Serializer):\n email = serializers.EmailField(validators=[user_with_email_not_existing])\n\n def save(self):\n validation_code = randrange(10000000, 100000000)\n email = Email.objects.create(validation_code=validation_code, to=\n self.validated_data.get('email'), type=self.validated_data.get(\n 'type'))\n new_validation = EmailValidation.objects.create(validation_code=\n validation_code, email=email, type=self.validated_data.get('type'))\n return new_validation\n\n\nclass EmailSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass EmailValidationSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code']\n\n\nclass EmailValidationPasswordSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n password = serializers.CharField(max_length=200)\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code', 'password']\n\n\nclass NewUserSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):\n\n def validate(self, attrs):\n data = super().validate(attrs)\n refresh = self.get_token(self.user)\n data['refresh'] = str(refresh)\n data['access'] = str(refresh.access_token)\n data['user'] = FullUserSerializer(self.user).data\n return data\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n\n\nclass CreatePasswordEmailValidationSerializer(serializers.Serializer):\n <assignment token>\n\n def save(self):\n validation_code = randrange(10000000, 100000000)\n email = Email.objects.create(validation_code=validation_code, to=\n self.validated_data.get('email'), type=self.validated_data.get(\n 'type'))\n new_validation = EmailValidation.objects.create(validation_code=\n validation_code, email=email, type=self.validated_data.get('type'))\n return new_validation\n\n\nclass CreateEmailValidationSerializer(serializers.Serializer):\n email = serializers.EmailField(validators=[user_with_email_not_existing])\n\n def save(self):\n validation_code = randrange(10000000, 100000000)\n email = Email.objects.create(validation_code=validation_code, to=\n self.validated_data.get('email'), type=self.validated_data.get(\n 'type'))\n new_validation = EmailValidation.objects.create(validation_code=\n validation_code, email=email, type=self.validated_data.get('type'))\n return new_validation\n\n\nclass EmailSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass EmailValidationSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code']\n\n\nclass EmailValidationPasswordSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n password = serializers.CharField(max_length=200)\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code', 'password']\n\n\nclass NewUserSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):\n\n def validate(self, attrs):\n data = super().validate(attrs)\n refresh = self.get_token(self.user)\n data['refresh'] = str(refresh)\n data['access'] = str(refresh.access_token)\n data['user'] = FullUserSerializer(self.user).data\n return data\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n\n\nclass CreatePasswordEmailValidationSerializer(serializers.Serializer):\n <assignment token>\n <function token>\n\n\nclass CreateEmailValidationSerializer(serializers.Serializer):\n email = serializers.EmailField(validators=[user_with_email_not_existing])\n\n def save(self):\n validation_code = randrange(10000000, 100000000)\n email = Email.objects.create(validation_code=validation_code, to=\n self.validated_data.get('email'), type=self.validated_data.get(\n 'type'))\n new_validation = EmailValidation.objects.create(validation_code=\n validation_code, email=email, type=self.validated_data.get('type'))\n return new_validation\n\n\nclass EmailSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass EmailValidationSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code']\n\n\nclass EmailValidationPasswordSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n password = serializers.CharField(max_length=200)\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code', 'password']\n\n\nclass NewUserSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):\n\n def validate(self, attrs):\n data = super().validate(attrs)\n refresh = self.get_token(self.user)\n data['refresh'] = str(refresh)\n data['access'] = str(refresh.access_token)\n data['user'] = FullUserSerializer(self.user).data\n return data\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n<class token>\n\n\nclass CreateEmailValidationSerializer(serializers.Serializer):\n email = serializers.EmailField(validators=[user_with_email_not_existing])\n\n def save(self):\n validation_code = randrange(10000000, 100000000)\n email = Email.objects.create(validation_code=validation_code, to=\n self.validated_data.get('email'), type=self.validated_data.get(\n 'type'))\n new_validation = EmailValidation.objects.create(validation_code=\n validation_code, email=email, type=self.validated_data.get('type'))\n return new_validation\n\n\nclass EmailSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass EmailValidationSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code']\n\n\nclass EmailValidationPasswordSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n password = serializers.CharField(max_length=200)\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code', 'password']\n\n\nclass NewUserSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):\n\n def validate(self, attrs):\n data = super().validate(attrs)\n refresh = self.get_token(self.user)\n data['refresh'] = str(refresh)\n data['access'] = str(refresh.access_token)\n data['user'] = FullUserSerializer(self.user).data\n return data\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n<class token>\n\n\nclass CreateEmailValidationSerializer(serializers.Serializer):\n <assignment token>\n\n def save(self):\n validation_code = randrange(10000000, 100000000)\n email = Email.objects.create(validation_code=validation_code, to=\n self.validated_data.get('email'), type=self.validated_data.get(\n 'type'))\n new_validation = EmailValidation.objects.create(validation_code=\n validation_code, email=email, type=self.validated_data.get('type'))\n return new_validation\n\n\nclass EmailSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass EmailValidationSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code']\n\n\nclass EmailValidationPasswordSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n password = serializers.CharField(max_length=200)\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code', 'password']\n\n\nclass NewUserSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):\n\n def validate(self, attrs):\n data = super().validate(attrs)\n refresh = self.get_token(self.user)\n data['refresh'] = str(refresh)\n data['access'] = str(refresh.access_token)\n data['user'] = FullUserSerializer(self.user).data\n return data\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n<class token>\n\n\nclass CreateEmailValidationSerializer(serializers.Serializer):\n <assignment token>\n <function token>\n\n\nclass EmailSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass EmailValidationSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code']\n\n\nclass EmailValidationPasswordSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n password = serializers.CharField(max_length=200)\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code', 'password']\n\n\nclass NewUserSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):\n\n def validate(self, attrs):\n data = super().validate(attrs)\n refresh = self.get_token(self.user)\n data['refresh'] = str(refresh)\n data['access'] = str(refresh.access_token)\n data['user'] = FullUserSerializer(self.user).data\n return data\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\nclass EmailSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass EmailValidationSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code']\n\n\nclass EmailValidationPasswordSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n password = serializers.CharField(max_length=200)\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code', 'password']\n\n\nclass NewUserSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):\n\n def validate(self, attrs):\n data = super().validate(attrs)\n refresh = self.get_token(self.user)\n data['refresh'] = str(refresh)\n data['access'] = str(refresh.access_token)\n data['user'] = FullUserSerializer(self.user).data\n return data\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\nclass EmailSerializer(serializers.ModelSerializer):\n <assignment token>\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass EmailValidationSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code']\n\n\nclass EmailValidationPasswordSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n password = serializers.CharField(max_length=200)\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code', 'password']\n\n\nclass NewUserSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):\n\n def validate(self, attrs):\n data = super().validate(attrs)\n refresh = self.get_token(self.user)\n data['refresh'] = str(refresh)\n data['access'] = str(refresh.access_token)\n data['user'] = FullUserSerializer(self.user).data\n return data\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass EmailValidationSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code']\n\n\nclass EmailValidationPasswordSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n password = serializers.CharField(max_length=200)\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code', 'password']\n\n\nclass NewUserSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):\n\n def validate(self, attrs):\n data = super().validate(attrs)\n refresh = self.get_token(self.user)\n data['refresh'] = str(refresh)\n data['access'] = str(refresh.access_token)\n data['user'] = FullUserSerializer(self.user).data\n return data\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass EmailValidationSerializer(serializers.ModelSerializer):\n <assignment token>\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code']\n\n\nclass EmailValidationPasswordSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n password = serializers.CharField(max_length=200)\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code', 'password']\n\n\nclass NewUserSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):\n\n def validate(self, attrs):\n data = super().validate(attrs)\n refresh = self.get_token(self.user)\n data['refresh'] = str(refresh)\n data['access'] = str(refresh.access_token)\n data['user'] = FullUserSerializer(self.user).data\n return data\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass EmailValidationPasswordSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n password = serializers.CharField(max_length=200)\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code', 'password']\n\n\nclass NewUserSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):\n\n def validate(self, attrs):\n data = super().validate(attrs)\n refresh = self.get_token(self.user)\n data['refresh'] = str(refresh)\n data['access'] = str(refresh.access_token)\n data['user'] = FullUserSerializer(self.user).data\n return data\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass EmailValidationPasswordSerializer(serializers.ModelSerializer):\n <assignment token>\n <assignment token>\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code', 'password']\n\n\nclass NewUserSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):\n\n def validate(self, attrs):\n data = super().validate(attrs)\n refresh = self.get_token(self.user)\n data['refresh'] = str(refresh)\n data['access'] = str(refresh.access_token)\n data['user'] = FullUserSerializer(self.user).data\n return data\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass NewUserSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):\n\n def validate(self, attrs):\n data = super().validate(attrs)\n refresh = self.get_token(self.user)\n data['refresh'] = str(refresh)\n data['access'] = str(refresh.access_token)\n data['user'] = FullUserSerializer(self.user).data\n return data\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass NewUserSerializer(serializers.ModelSerializer):\n <assignment token>\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):\n\n def validate(self, attrs):\n data = super().validate(attrs)\n refresh = self.get_token(self.user)\n data['refresh'] = str(refresh)\n data['access'] = str(refresh.access_token)\n data['user'] = FullUserSerializer(self.user).data\n return data\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):\n\n def validate(self, attrs):\n data = super().validate(attrs)\n refresh = self.get_token(self.user)\n data['refresh'] = str(refresh)\n data['access'] = str(refresh.access_token)\n data['user'] = FullUserSerializer(self.user).data\n return data\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):\n <function token>\n", "<import token>\n<assignment token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n" ]
false
360
2ca91c410b8c8d6306d5ed918783a4d77a091ba8
from typing import List from re import match from utility import ButtonGroup import rumps class RepeatWorkBreak(rumps.App): def __init__(self): rumps.debug_mode(True) self.config = { "app_title": "Repeat Work and Break", "start": "Start", "pause": "Pause Timer", "continue": "Continue Timer", "stop": "Stop Timer", "timeout_message": "Time is up! Take a break :)", "shift_time_in_seconds": 60 * 60 * 1, # 60 seconds * 60 = 1 hour "break_time_in_seconds": 60 * 5, 'shift_setting_buttons': [ { 'title': '1 hour', }, { 'title': '4 hour', }, { 'title': '8 hour', } ], 'break_setting_buttons': [ { 'title': '5 minutes', }, { 'title': '10 minutes', }, { 'title': '15 minutes', } ], } self.app = rumps.App(self.config['app_title']) self.timer = rumps.Timer(self.on_tick, 1) self.shift_setting_button_group = ButtonGroup( self.config['shift_setting_buttons'], callback=self.handle_shift_setting_button) self.break_setting_button_group = ButtonGroup( self.config['break_setting_buttons'], callback=self.handle_shift_setting_button) self.shift_time_in_seconds = self.config["shift_time_in_seconds"] self.break_time_in_seconds = self.config["break_time_in_seconds"] self.elapsed_shift_time_in_hours = 0 self.progress_box = '◻︎' * (self.shift_time_in_seconds // 3600) self.start_pause_button = rumps.MenuItem( title=self.config["start"], callback=self.start_timer) self.stop_button = rumps.MenuItem( title=self.config["stop"], callback=None) self.app.menu = [ { 'Preferences': { "Setting Shift": self.shift_setting_button_group.buttons, "Setting Break / hr": self.break_setting_button_group.buttons, } }, None, self.start_pause_button, self.stop_button, ] def set_up_menu(self): self.timer.stop() self.timer.count = 0 self.app.title = self.config['app_title'] def convert_seconds_to_time_string(self, seconds) -> str: seconds = seconds % (24 * 3600) hours, seconds = divmod(seconds, 3600) minutes, seconds = divmod(seconds, 60) return "%d:%02d:%02d" % (hours, minutes, seconds) def on_tick(self, sender): time_left_in_seconds = sender.end - sender.count time_left_in_string = self.convert_seconds_to_time_string( time_left_in_seconds) if sender.count != 0 and sender.count % 3600 == 0: self.elapsed_shift_time_in_hours += 1 self.update_progress_box() if time_left_in_seconds == 0: rumps.notification( title=self.config["app_title"], subtitle=self.config["timeout_message"], message='') self.stop_timer() self.stop_button.set_callback(None) else: self.stop_button.set_callback(self.stop_timer) self.app.title = self.progress_box + ' | ' + time_left_in_string sender.count += 1 def update_progress_box(self): self.progress_box = self.elapsed_shift_time_in_hours * '☑︎' + (self.shift_time_in_seconds // 3600 - self.elapsed_shift_time_in_hours) * '◻︎' def start_timer(self, sender): if sender.title.lower().startswith(("start", "continue")): if sender.title == self.config["start"]: self.timer.count = 0 self.timer.end = self.shift_time_in_seconds sender.title = self.config["pause"] self.timer.start() else: sender.title = self.config["continue"] self.timer.stop() def stop_timer(self, sender=None): self.set_up_menu() self.stop_button.set_callback(None) self.start_pause_button.title = self.config["start"] def handle_shift_setting_button(self, sender): self.shift_setting_button_group.toggle(sender) selected_hours = int(match(r'^\d+\s{1}', sender.title)[0]) self.progress_box = "◻︎" * selected_hours # update empty progress box self.shift_time_in_seconds = selected_hours * 3600 # hours in seconds def handle_break_setting_button(self, sender): self.break_setting_button_group.toggle(sender) selected_minutes = int(match(r'^\d+\s{1}', sender.title)[0]) self.break_time_in_seconds = selected_minutes * 60 def run(self): self.app.run() if __name__ == "__main__": app = RepeatWorkBreak() app.run()
[ "from typing import List\nfrom re import match\nfrom utility import ButtonGroup\nimport rumps\n\n\nclass RepeatWorkBreak(rumps.App):\n def __init__(self):\n rumps.debug_mode(True)\n\n self.config = {\n \"app_title\": \"Repeat Work and Break\",\n \"start\": \"Start\",\n \"pause\": \"Pause Timer\",\n \"continue\": \"Continue Timer\",\n \"stop\": \"Stop Timer\",\n \"timeout_message\": \"Time is up! Take a break :)\",\n \"shift_time_in_seconds\": 60 * 60 * 1, # 60 seconds * 60 = 1 hour\n \"break_time_in_seconds\": 60 * 5,\n 'shift_setting_buttons': [\n {\n 'title': '1 hour',\n },\n {\n 'title': '4 hour',\n },\n {\n 'title': '8 hour',\n }\n ],\n 'break_setting_buttons': [\n {\n 'title': '5 minutes',\n },\n {\n 'title': '10 minutes',\n },\n {\n 'title': '15 minutes',\n }\n ],\n }\n self.app = rumps.App(self.config['app_title'])\n self.timer = rumps.Timer(self.on_tick, 1)\n self.shift_setting_button_group = ButtonGroup(\n self.config['shift_setting_buttons'], callback=self.handle_shift_setting_button)\n self.break_setting_button_group = ButtonGroup(\n self.config['break_setting_buttons'], callback=self.handle_shift_setting_button)\n self.shift_time_in_seconds = self.config[\"shift_time_in_seconds\"]\n self.break_time_in_seconds = self.config[\"break_time_in_seconds\"]\n self.elapsed_shift_time_in_hours = 0\n self.progress_box = '◻︎' * (self.shift_time_in_seconds // 3600)\n self.start_pause_button = rumps.MenuItem(\n title=self.config[\"start\"], callback=self.start_timer)\n self.stop_button = rumps.MenuItem(\n title=self.config[\"stop\"], callback=None)\n self.app.menu = [\n {\n 'Preferences':\n {\n \"Setting Shift\": self.shift_setting_button_group.buttons,\n \"Setting Break / hr\": self.break_setting_button_group.buttons,\n }\n },\n None,\n self.start_pause_button,\n self.stop_button,\n ]\n\n def set_up_menu(self):\n self.timer.stop()\n self.timer.count = 0\n self.app.title = self.config['app_title']\n\n def convert_seconds_to_time_string(self, seconds) -> str:\n seconds = seconds % (24 * 3600)\n hours, seconds = divmod(seconds, 3600)\n minutes, seconds = divmod(seconds, 60)\n\n return \"%d:%02d:%02d\" % (hours, minutes, seconds)\n\n def on_tick(self, sender):\n time_left_in_seconds = sender.end - sender.count\n\n time_left_in_string = self.convert_seconds_to_time_string(\n time_left_in_seconds)\n if sender.count != 0 and sender.count % 3600 == 0:\n self.elapsed_shift_time_in_hours += 1\n self.update_progress_box()\n if time_left_in_seconds == 0:\n rumps.notification(\n title=self.config[\"app_title\"], subtitle=self.config[\"timeout_message\"], message='')\n self.stop_timer()\n self.stop_button.set_callback(None)\n else:\n self.stop_button.set_callback(self.stop_timer)\n\n self.app.title = self.progress_box + ' | ' + time_left_in_string\n sender.count += 1\n\n def update_progress_box(self):\n self.progress_box = self.elapsed_shift_time_in_hours * '☑︎' + (self.shift_time_in_seconds // 3600 -\n self.elapsed_shift_time_in_hours) * '◻︎'\n\n def start_timer(self, sender):\n if sender.title.lower().startswith((\"start\", \"continue\")):\n if sender.title == self.config[\"start\"]:\n self.timer.count = 0\n self.timer.end = self.shift_time_in_seconds\n sender.title = self.config[\"pause\"]\n self.timer.start()\n else:\n sender.title = self.config[\"continue\"]\n self.timer.stop()\n\n def stop_timer(self, sender=None):\n self.set_up_menu()\n self.stop_button.set_callback(None)\n self.start_pause_button.title = self.config[\"start\"]\n\n def handle_shift_setting_button(self, sender):\n self.shift_setting_button_group.toggle(sender)\n selected_hours = int(match(r'^\\d+\\s{1}', sender.title)[0])\n self.progress_box = \"◻︎\" * selected_hours # update empty progress box\n self.shift_time_in_seconds = selected_hours * 3600 # hours in seconds\n\n def handle_break_setting_button(self, sender):\n self.break_setting_button_group.toggle(sender)\n selected_minutes = int(match(r'^\\d+\\s{1}', sender.title)[0])\n self.break_time_in_seconds = selected_minutes * 60\n\n def run(self):\n self.app.run()\n\n\nif __name__ == \"__main__\":\n app = RepeatWorkBreak()\n app.run()\n", "from typing import List\nfrom re import match\nfrom utility import ButtonGroup\nimport rumps\n\n\nclass RepeatWorkBreak(rumps.App):\n\n def __init__(self):\n rumps.debug_mode(True)\n self.config = {'app_title': 'Repeat Work and Break', 'start':\n 'Start', 'pause': 'Pause Timer', 'continue': 'Continue Timer',\n 'stop': 'Stop Timer', 'timeout_message':\n 'Time is up! Take a break :)', 'shift_time_in_seconds': 60 * 60 *\n 1, 'break_time_in_seconds': 60 * 5, 'shift_setting_buttons': [{\n 'title': '1 hour'}, {'title': '4 hour'}, {'title': '8 hour'}],\n 'break_setting_buttons': [{'title': '5 minutes'}, {'title':\n '10 minutes'}, {'title': '15 minutes'}]}\n self.app = rumps.App(self.config['app_title'])\n self.timer = rumps.Timer(self.on_tick, 1)\n self.shift_setting_button_group = ButtonGroup(self.config[\n 'shift_setting_buttons'], callback=self.handle_shift_setting_button\n )\n self.break_setting_button_group = ButtonGroup(self.config[\n 'break_setting_buttons'], callback=self.handle_shift_setting_button\n )\n self.shift_time_in_seconds = self.config['shift_time_in_seconds']\n self.break_time_in_seconds = self.config['break_time_in_seconds']\n self.elapsed_shift_time_in_hours = 0\n self.progress_box = '◻︎' * (self.shift_time_in_seconds // 3600)\n self.start_pause_button = rumps.MenuItem(title=self.config['start'],\n callback=self.start_timer)\n self.stop_button = rumps.MenuItem(title=self.config['stop'],\n callback=None)\n self.app.menu = [{'Preferences': {'Setting Shift': self.\n shift_setting_button_group.buttons, 'Setting Break / hr': self.\n break_setting_button_group.buttons}}, None, self.\n start_pause_button, self.stop_button]\n\n def set_up_menu(self):\n self.timer.stop()\n self.timer.count = 0\n self.app.title = self.config['app_title']\n\n def convert_seconds_to_time_string(self, seconds) ->str:\n seconds = seconds % (24 * 3600)\n hours, seconds = divmod(seconds, 3600)\n minutes, seconds = divmod(seconds, 60)\n return '%d:%02d:%02d' % (hours, minutes, seconds)\n\n def on_tick(self, sender):\n time_left_in_seconds = sender.end - sender.count\n time_left_in_string = self.convert_seconds_to_time_string(\n time_left_in_seconds)\n if sender.count != 0 and sender.count % 3600 == 0:\n self.elapsed_shift_time_in_hours += 1\n self.update_progress_box()\n if time_left_in_seconds == 0:\n rumps.notification(title=self.config['app_title'], subtitle=\n self.config['timeout_message'], message='')\n self.stop_timer()\n self.stop_button.set_callback(None)\n else:\n self.stop_button.set_callback(self.stop_timer)\n self.app.title = self.progress_box + ' | ' + time_left_in_string\n sender.count += 1\n\n def update_progress_box(self):\n self.progress_box = self.elapsed_shift_time_in_hours * '☑︎' + (self\n .shift_time_in_seconds // 3600 - self.elapsed_shift_time_in_hours\n ) * '◻︎'\n\n def start_timer(self, sender):\n if sender.title.lower().startswith(('start', 'continue')):\n if sender.title == self.config['start']:\n self.timer.count = 0\n self.timer.end = self.shift_time_in_seconds\n sender.title = self.config['pause']\n self.timer.start()\n else:\n sender.title = self.config['continue']\n self.timer.stop()\n\n def stop_timer(self, sender=None):\n self.set_up_menu()\n self.stop_button.set_callback(None)\n self.start_pause_button.title = self.config['start']\n\n def handle_shift_setting_button(self, sender):\n self.shift_setting_button_group.toggle(sender)\n selected_hours = int(match('^\\\\d+\\\\s{1}', sender.title)[0])\n self.progress_box = '◻︎' * selected_hours\n self.shift_time_in_seconds = selected_hours * 3600\n\n def handle_break_setting_button(self, sender):\n self.break_setting_button_group.toggle(sender)\n selected_minutes = int(match('^\\\\d+\\\\s{1}', sender.title)[0])\n self.break_time_in_seconds = selected_minutes * 60\n\n def run(self):\n self.app.run()\n\n\nif __name__ == '__main__':\n app = RepeatWorkBreak()\n app.run()\n", "<import token>\n\n\nclass RepeatWorkBreak(rumps.App):\n\n def __init__(self):\n rumps.debug_mode(True)\n self.config = {'app_title': 'Repeat Work and Break', 'start':\n 'Start', 'pause': 'Pause Timer', 'continue': 'Continue Timer',\n 'stop': 'Stop Timer', 'timeout_message':\n 'Time is up! Take a break :)', 'shift_time_in_seconds': 60 * 60 *\n 1, 'break_time_in_seconds': 60 * 5, 'shift_setting_buttons': [{\n 'title': '1 hour'}, {'title': '4 hour'}, {'title': '8 hour'}],\n 'break_setting_buttons': [{'title': '5 minutes'}, {'title':\n '10 minutes'}, {'title': '15 minutes'}]}\n self.app = rumps.App(self.config['app_title'])\n self.timer = rumps.Timer(self.on_tick, 1)\n self.shift_setting_button_group = ButtonGroup(self.config[\n 'shift_setting_buttons'], callback=self.handle_shift_setting_button\n )\n self.break_setting_button_group = ButtonGroup(self.config[\n 'break_setting_buttons'], callback=self.handle_shift_setting_button\n )\n self.shift_time_in_seconds = self.config['shift_time_in_seconds']\n self.break_time_in_seconds = self.config['break_time_in_seconds']\n self.elapsed_shift_time_in_hours = 0\n self.progress_box = '◻︎' * (self.shift_time_in_seconds // 3600)\n self.start_pause_button = rumps.MenuItem(title=self.config['start'],\n callback=self.start_timer)\n self.stop_button = rumps.MenuItem(title=self.config['stop'],\n callback=None)\n self.app.menu = [{'Preferences': {'Setting Shift': self.\n shift_setting_button_group.buttons, 'Setting Break / hr': self.\n break_setting_button_group.buttons}}, None, self.\n start_pause_button, self.stop_button]\n\n def set_up_menu(self):\n self.timer.stop()\n self.timer.count = 0\n self.app.title = self.config['app_title']\n\n def convert_seconds_to_time_string(self, seconds) ->str:\n seconds = seconds % (24 * 3600)\n hours, seconds = divmod(seconds, 3600)\n minutes, seconds = divmod(seconds, 60)\n return '%d:%02d:%02d' % (hours, minutes, seconds)\n\n def on_tick(self, sender):\n time_left_in_seconds = sender.end - sender.count\n time_left_in_string = self.convert_seconds_to_time_string(\n time_left_in_seconds)\n if sender.count != 0 and sender.count % 3600 == 0:\n self.elapsed_shift_time_in_hours += 1\n self.update_progress_box()\n if time_left_in_seconds == 0:\n rumps.notification(title=self.config['app_title'], subtitle=\n self.config['timeout_message'], message='')\n self.stop_timer()\n self.stop_button.set_callback(None)\n else:\n self.stop_button.set_callback(self.stop_timer)\n self.app.title = self.progress_box + ' | ' + time_left_in_string\n sender.count += 1\n\n def update_progress_box(self):\n self.progress_box = self.elapsed_shift_time_in_hours * '☑︎' + (self\n .shift_time_in_seconds // 3600 - self.elapsed_shift_time_in_hours\n ) * '◻︎'\n\n def start_timer(self, sender):\n if sender.title.lower().startswith(('start', 'continue')):\n if sender.title == self.config['start']:\n self.timer.count = 0\n self.timer.end = self.shift_time_in_seconds\n sender.title = self.config['pause']\n self.timer.start()\n else:\n sender.title = self.config['continue']\n self.timer.stop()\n\n def stop_timer(self, sender=None):\n self.set_up_menu()\n self.stop_button.set_callback(None)\n self.start_pause_button.title = self.config['start']\n\n def handle_shift_setting_button(self, sender):\n self.shift_setting_button_group.toggle(sender)\n selected_hours = int(match('^\\\\d+\\\\s{1}', sender.title)[0])\n self.progress_box = '◻︎' * selected_hours\n self.shift_time_in_seconds = selected_hours * 3600\n\n def handle_break_setting_button(self, sender):\n self.break_setting_button_group.toggle(sender)\n selected_minutes = int(match('^\\\\d+\\\\s{1}', sender.title)[0])\n self.break_time_in_seconds = selected_minutes * 60\n\n def run(self):\n self.app.run()\n\n\nif __name__ == '__main__':\n app = RepeatWorkBreak()\n app.run()\n", "<import token>\n\n\nclass RepeatWorkBreak(rumps.App):\n\n def __init__(self):\n rumps.debug_mode(True)\n self.config = {'app_title': 'Repeat Work and Break', 'start':\n 'Start', 'pause': 'Pause Timer', 'continue': 'Continue Timer',\n 'stop': 'Stop Timer', 'timeout_message':\n 'Time is up! Take a break :)', 'shift_time_in_seconds': 60 * 60 *\n 1, 'break_time_in_seconds': 60 * 5, 'shift_setting_buttons': [{\n 'title': '1 hour'}, {'title': '4 hour'}, {'title': '8 hour'}],\n 'break_setting_buttons': [{'title': '5 minutes'}, {'title':\n '10 minutes'}, {'title': '15 minutes'}]}\n self.app = rumps.App(self.config['app_title'])\n self.timer = rumps.Timer(self.on_tick, 1)\n self.shift_setting_button_group = ButtonGroup(self.config[\n 'shift_setting_buttons'], callback=self.handle_shift_setting_button\n )\n self.break_setting_button_group = ButtonGroup(self.config[\n 'break_setting_buttons'], callback=self.handle_shift_setting_button\n )\n self.shift_time_in_seconds = self.config['shift_time_in_seconds']\n self.break_time_in_seconds = self.config['break_time_in_seconds']\n self.elapsed_shift_time_in_hours = 0\n self.progress_box = '◻︎' * (self.shift_time_in_seconds // 3600)\n self.start_pause_button = rumps.MenuItem(title=self.config['start'],\n callback=self.start_timer)\n self.stop_button = rumps.MenuItem(title=self.config['stop'],\n callback=None)\n self.app.menu = [{'Preferences': {'Setting Shift': self.\n shift_setting_button_group.buttons, 'Setting Break / hr': self.\n break_setting_button_group.buttons}}, None, self.\n start_pause_button, self.stop_button]\n\n def set_up_menu(self):\n self.timer.stop()\n self.timer.count = 0\n self.app.title = self.config['app_title']\n\n def convert_seconds_to_time_string(self, seconds) ->str:\n seconds = seconds % (24 * 3600)\n hours, seconds = divmod(seconds, 3600)\n minutes, seconds = divmod(seconds, 60)\n return '%d:%02d:%02d' % (hours, minutes, seconds)\n\n def on_tick(self, sender):\n time_left_in_seconds = sender.end - sender.count\n time_left_in_string = self.convert_seconds_to_time_string(\n time_left_in_seconds)\n if sender.count != 0 and sender.count % 3600 == 0:\n self.elapsed_shift_time_in_hours += 1\n self.update_progress_box()\n if time_left_in_seconds == 0:\n rumps.notification(title=self.config['app_title'], subtitle=\n self.config['timeout_message'], message='')\n self.stop_timer()\n self.stop_button.set_callback(None)\n else:\n self.stop_button.set_callback(self.stop_timer)\n self.app.title = self.progress_box + ' | ' + time_left_in_string\n sender.count += 1\n\n def update_progress_box(self):\n self.progress_box = self.elapsed_shift_time_in_hours * '☑︎' + (self\n .shift_time_in_seconds // 3600 - self.elapsed_shift_time_in_hours\n ) * '◻︎'\n\n def start_timer(self, sender):\n if sender.title.lower().startswith(('start', 'continue')):\n if sender.title == self.config['start']:\n self.timer.count = 0\n self.timer.end = self.shift_time_in_seconds\n sender.title = self.config['pause']\n self.timer.start()\n else:\n sender.title = self.config['continue']\n self.timer.stop()\n\n def stop_timer(self, sender=None):\n self.set_up_menu()\n self.stop_button.set_callback(None)\n self.start_pause_button.title = self.config['start']\n\n def handle_shift_setting_button(self, sender):\n self.shift_setting_button_group.toggle(sender)\n selected_hours = int(match('^\\\\d+\\\\s{1}', sender.title)[0])\n self.progress_box = '◻︎' * selected_hours\n self.shift_time_in_seconds = selected_hours * 3600\n\n def handle_break_setting_button(self, sender):\n self.break_setting_button_group.toggle(sender)\n selected_minutes = int(match('^\\\\d+\\\\s{1}', sender.title)[0])\n self.break_time_in_seconds = selected_minutes * 60\n\n def run(self):\n self.app.run()\n\n\n<code token>\n", "<import token>\n\n\nclass RepeatWorkBreak(rumps.App):\n\n def __init__(self):\n rumps.debug_mode(True)\n self.config = {'app_title': 'Repeat Work and Break', 'start':\n 'Start', 'pause': 'Pause Timer', 'continue': 'Continue Timer',\n 'stop': 'Stop Timer', 'timeout_message':\n 'Time is up! Take a break :)', 'shift_time_in_seconds': 60 * 60 *\n 1, 'break_time_in_seconds': 60 * 5, 'shift_setting_buttons': [{\n 'title': '1 hour'}, {'title': '4 hour'}, {'title': '8 hour'}],\n 'break_setting_buttons': [{'title': '5 minutes'}, {'title':\n '10 minutes'}, {'title': '15 minutes'}]}\n self.app = rumps.App(self.config['app_title'])\n self.timer = rumps.Timer(self.on_tick, 1)\n self.shift_setting_button_group = ButtonGroup(self.config[\n 'shift_setting_buttons'], callback=self.handle_shift_setting_button\n )\n self.break_setting_button_group = ButtonGroup(self.config[\n 'break_setting_buttons'], callback=self.handle_shift_setting_button\n )\n self.shift_time_in_seconds = self.config['shift_time_in_seconds']\n self.break_time_in_seconds = self.config['break_time_in_seconds']\n self.elapsed_shift_time_in_hours = 0\n self.progress_box = '◻︎' * (self.shift_time_in_seconds // 3600)\n self.start_pause_button = rumps.MenuItem(title=self.config['start'],\n callback=self.start_timer)\n self.stop_button = rumps.MenuItem(title=self.config['stop'],\n callback=None)\n self.app.menu = [{'Preferences': {'Setting Shift': self.\n shift_setting_button_group.buttons, 'Setting Break / hr': self.\n break_setting_button_group.buttons}}, None, self.\n start_pause_button, self.stop_button]\n\n def set_up_menu(self):\n self.timer.stop()\n self.timer.count = 0\n self.app.title = self.config['app_title']\n\n def convert_seconds_to_time_string(self, seconds) ->str:\n seconds = seconds % (24 * 3600)\n hours, seconds = divmod(seconds, 3600)\n minutes, seconds = divmod(seconds, 60)\n return '%d:%02d:%02d' % (hours, minutes, seconds)\n\n def on_tick(self, sender):\n time_left_in_seconds = sender.end - sender.count\n time_left_in_string = self.convert_seconds_to_time_string(\n time_left_in_seconds)\n if sender.count != 0 and sender.count % 3600 == 0:\n self.elapsed_shift_time_in_hours += 1\n self.update_progress_box()\n if time_left_in_seconds == 0:\n rumps.notification(title=self.config['app_title'], subtitle=\n self.config['timeout_message'], message='')\n self.stop_timer()\n self.stop_button.set_callback(None)\n else:\n self.stop_button.set_callback(self.stop_timer)\n self.app.title = self.progress_box + ' | ' + time_left_in_string\n sender.count += 1\n\n def update_progress_box(self):\n self.progress_box = self.elapsed_shift_time_in_hours * '☑︎' + (self\n .shift_time_in_seconds // 3600 - self.elapsed_shift_time_in_hours\n ) * '◻︎'\n\n def start_timer(self, sender):\n if sender.title.lower().startswith(('start', 'continue')):\n if sender.title == self.config['start']:\n self.timer.count = 0\n self.timer.end = self.shift_time_in_seconds\n sender.title = self.config['pause']\n self.timer.start()\n else:\n sender.title = self.config['continue']\n self.timer.stop()\n\n def stop_timer(self, sender=None):\n self.set_up_menu()\n self.stop_button.set_callback(None)\n self.start_pause_button.title = self.config['start']\n\n def handle_shift_setting_button(self, sender):\n self.shift_setting_button_group.toggle(sender)\n selected_hours = int(match('^\\\\d+\\\\s{1}', sender.title)[0])\n self.progress_box = '◻︎' * selected_hours\n self.shift_time_in_seconds = selected_hours * 3600\n <function token>\n\n def run(self):\n self.app.run()\n\n\n<code token>\n", "<import token>\n\n\nclass RepeatWorkBreak(rumps.App):\n\n def __init__(self):\n rumps.debug_mode(True)\n self.config = {'app_title': 'Repeat Work and Break', 'start':\n 'Start', 'pause': 'Pause Timer', 'continue': 'Continue Timer',\n 'stop': 'Stop Timer', 'timeout_message':\n 'Time is up! Take a break :)', 'shift_time_in_seconds': 60 * 60 *\n 1, 'break_time_in_seconds': 60 * 5, 'shift_setting_buttons': [{\n 'title': '1 hour'}, {'title': '4 hour'}, {'title': '8 hour'}],\n 'break_setting_buttons': [{'title': '5 minutes'}, {'title':\n '10 minutes'}, {'title': '15 minutes'}]}\n self.app = rumps.App(self.config['app_title'])\n self.timer = rumps.Timer(self.on_tick, 1)\n self.shift_setting_button_group = ButtonGroup(self.config[\n 'shift_setting_buttons'], callback=self.handle_shift_setting_button\n )\n self.break_setting_button_group = ButtonGroup(self.config[\n 'break_setting_buttons'], callback=self.handle_shift_setting_button\n )\n self.shift_time_in_seconds = self.config['shift_time_in_seconds']\n self.break_time_in_seconds = self.config['break_time_in_seconds']\n self.elapsed_shift_time_in_hours = 0\n self.progress_box = '◻︎' * (self.shift_time_in_seconds // 3600)\n self.start_pause_button = rumps.MenuItem(title=self.config['start'],\n callback=self.start_timer)\n self.stop_button = rumps.MenuItem(title=self.config['stop'],\n callback=None)\n self.app.menu = [{'Preferences': {'Setting Shift': self.\n shift_setting_button_group.buttons, 'Setting Break / hr': self.\n break_setting_button_group.buttons}}, None, self.\n start_pause_button, self.stop_button]\n\n def set_up_menu(self):\n self.timer.stop()\n self.timer.count = 0\n self.app.title = self.config['app_title']\n\n def convert_seconds_to_time_string(self, seconds) ->str:\n seconds = seconds % (24 * 3600)\n hours, seconds = divmod(seconds, 3600)\n minutes, seconds = divmod(seconds, 60)\n return '%d:%02d:%02d' % (hours, minutes, seconds)\n\n def on_tick(self, sender):\n time_left_in_seconds = sender.end - sender.count\n time_left_in_string = self.convert_seconds_to_time_string(\n time_left_in_seconds)\n if sender.count != 0 and sender.count % 3600 == 0:\n self.elapsed_shift_time_in_hours += 1\n self.update_progress_box()\n if time_left_in_seconds == 0:\n rumps.notification(title=self.config['app_title'], subtitle=\n self.config['timeout_message'], message='')\n self.stop_timer()\n self.stop_button.set_callback(None)\n else:\n self.stop_button.set_callback(self.stop_timer)\n self.app.title = self.progress_box + ' | ' + time_left_in_string\n sender.count += 1\n\n def update_progress_box(self):\n self.progress_box = self.elapsed_shift_time_in_hours * '☑︎' + (self\n .shift_time_in_seconds // 3600 - self.elapsed_shift_time_in_hours\n ) * '◻︎'\n\n def start_timer(self, sender):\n if sender.title.lower().startswith(('start', 'continue')):\n if sender.title == self.config['start']:\n self.timer.count = 0\n self.timer.end = self.shift_time_in_seconds\n sender.title = self.config['pause']\n self.timer.start()\n else:\n sender.title = self.config['continue']\n self.timer.stop()\n <function token>\n\n def handle_shift_setting_button(self, sender):\n self.shift_setting_button_group.toggle(sender)\n selected_hours = int(match('^\\\\d+\\\\s{1}', sender.title)[0])\n self.progress_box = '◻︎' * selected_hours\n self.shift_time_in_seconds = selected_hours * 3600\n <function token>\n\n def run(self):\n self.app.run()\n\n\n<code token>\n", "<import token>\n\n\nclass RepeatWorkBreak(rumps.App):\n\n def __init__(self):\n rumps.debug_mode(True)\n self.config = {'app_title': 'Repeat Work and Break', 'start':\n 'Start', 'pause': 'Pause Timer', 'continue': 'Continue Timer',\n 'stop': 'Stop Timer', 'timeout_message':\n 'Time is up! Take a break :)', 'shift_time_in_seconds': 60 * 60 *\n 1, 'break_time_in_seconds': 60 * 5, 'shift_setting_buttons': [{\n 'title': '1 hour'}, {'title': '4 hour'}, {'title': '8 hour'}],\n 'break_setting_buttons': [{'title': '5 minutes'}, {'title':\n '10 minutes'}, {'title': '15 minutes'}]}\n self.app = rumps.App(self.config['app_title'])\n self.timer = rumps.Timer(self.on_tick, 1)\n self.shift_setting_button_group = ButtonGroup(self.config[\n 'shift_setting_buttons'], callback=self.handle_shift_setting_button\n )\n self.break_setting_button_group = ButtonGroup(self.config[\n 'break_setting_buttons'], callback=self.handle_shift_setting_button\n )\n self.shift_time_in_seconds = self.config['shift_time_in_seconds']\n self.break_time_in_seconds = self.config['break_time_in_seconds']\n self.elapsed_shift_time_in_hours = 0\n self.progress_box = '◻︎' * (self.shift_time_in_seconds // 3600)\n self.start_pause_button = rumps.MenuItem(title=self.config['start'],\n callback=self.start_timer)\n self.stop_button = rumps.MenuItem(title=self.config['stop'],\n callback=None)\n self.app.menu = [{'Preferences': {'Setting Shift': self.\n shift_setting_button_group.buttons, 'Setting Break / hr': self.\n break_setting_button_group.buttons}}, None, self.\n start_pause_button, self.stop_button]\n\n def set_up_menu(self):\n self.timer.stop()\n self.timer.count = 0\n self.app.title = self.config['app_title']\n\n def convert_seconds_to_time_string(self, seconds) ->str:\n seconds = seconds % (24 * 3600)\n hours, seconds = divmod(seconds, 3600)\n minutes, seconds = divmod(seconds, 60)\n return '%d:%02d:%02d' % (hours, minutes, seconds)\n <function token>\n\n def update_progress_box(self):\n self.progress_box = self.elapsed_shift_time_in_hours * '☑︎' + (self\n .shift_time_in_seconds // 3600 - self.elapsed_shift_time_in_hours\n ) * '◻︎'\n\n def start_timer(self, sender):\n if sender.title.lower().startswith(('start', 'continue')):\n if sender.title == self.config['start']:\n self.timer.count = 0\n self.timer.end = self.shift_time_in_seconds\n sender.title = self.config['pause']\n self.timer.start()\n else:\n sender.title = self.config['continue']\n self.timer.stop()\n <function token>\n\n def handle_shift_setting_button(self, sender):\n self.shift_setting_button_group.toggle(sender)\n selected_hours = int(match('^\\\\d+\\\\s{1}', sender.title)[0])\n self.progress_box = '◻︎' * selected_hours\n self.shift_time_in_seconds = selected_hours * 3600\n <function token>\n\n def run(self):\n self.app.run()\n\n\n<code token>\n", "<import token>\n\n\nclass RepeatWorkBreak(rumps.App):\n\n def __init__(self):\n rumps.debug_mode(True)\n self.config = {'app_title': 'Repeat Work and Break', 'start':\n 'Start', 'pause': 'Pause Timer', 'continue': 'Continue Timer',\n 'stop': 'Stop Timer', 'timeout_message':\n 'Time is up! Take a break :)', 'shift_time_in_seconds': 60 * 60 *\n 1, 'break_time_in_seconds': 60 * 5, 'shift_setting_buttons': [{\n 'title': '1 hour'}, {'title': '4 hour'}, {'title': '8 hour'}],\n 'break_setting_buttons': [{'title': '5 minutes'}, {'title':\n '10 minutes'}, {'title': '15 minutes'}]}\n self.app = rumps.App(self.config['app_title'])\n self.timer = rumps.Timer(self.on_tick, 1)\n self.shift_setting_button_group = ButtonGroup(self.config[\n 'shift_setting_buttons'], callback=self.handle_shift_setting_button\n )\n self.break_setting_button_group = ButtonGroup(self.config[\n 'break_setting_buttons'], callback=self.handle_shift_setting_button\n )\n self.shift_time_in_seconds = self.config['shift_time_in_seconds']\n self.break_time_in_seconds = self.config['break_time_in_seconds']\n self.elapsed_shift_time_in_hours = 0\n self.progress_box = '◻︎' * (self.shift_time_in_seconds // 3600)\n self.start_pause_button = rumps.MenuItem(title=self.config['start'],\n callback=self.start_timer)\n self.stop_button = rumps.MenuItem(title=self.config['stop'],\n callback=None)\n self.app.menu = [{'Preferences': {'Setting Shift': self.\n shift_setting_button_group.buttons, 'Setting Break / hr': self.\n break_setting_button_group.buttons}}, None, self.\n start_pause_button, self.stop_button]\n <function token>\n\n def convert_seconds_to_time_string(self, seconds) ->str:\n seconds = seconds % (24 * 3600)\n hours, seconds = divmod(seconds, 3600)\n minutes, seconds = divmod(seconds, 60)\n return '%d:%02d:%02d' % (hours, minutes, seconds)\n <function token>\n\n def update_progress_box(self):\n self.progress_box = self.elapsed_shift_time_in_hours * '☑︎' + (self\n .shift_time_in_seconds // 3600 - self.elapsed_shift_time_in_hours\n ) * '◻︎'\n\n def start_timer(self, sender):\n if sender.title.lower().startswith(('start', 'continue')):\n if sender.title == self.config['start']:\n self.timer.count = 0\n self.timer.end = self.shift_time_in_seconds\n sender.title = self.config['pause']\n self.timer.start()\n else:\n sender.title = self.config['continue']\n self.timer.stop()\n <function token>\n\n def handle_shift_setting_button(self, sender):\n self.shift_setting_button_group.toggle(sender)\n selected_hours = int(match('^\\\\d+\\\\s{1}', sender.title)[0])\n self.progress_box = '◻︎' * selected_hours\n self.shift_time_in_seconds = selected_hours * 3600\n <function token>\n\n def run(self):\n self.app.run()\n\n\n<code token>\n", "<import token>\n\n\nclass RepeatWorkBreak(rumps.App):\n\n def __init__(self):\n rumps.debug_mode(True)\n self.config = {'app_title': 'Repeat Work and Break', 'start':\n 'Start', 'pause': 'Pause Timer', 'continue': 'Continue Timer',\n 'stop': 'Stop Timer', 'timeout_message':\n 'Time is up! Take a break :)', 'shift_time_in_seconds': 60 * 60 *\n 1, 'break_time_in_seconds': 60 * 5, 'shift_setting_buttons': [{\n 'title': '1 hour'}, {'title': '4 hour'}, {'title': '8 hour'}],\n 'break_setting_buttons': [{'title': '5 minutes'}, {'title':\n '10 minutes'}, {'title': '15 minutes'}]}\n self.app = rumps.App(self.config['app_title'])\n self.timer = rumps.Timer(self.on_tick, 1)\n self.shift_setting_button_group = ButtonGroup(self.config[\n 'shift_setting_buttons'], callback=self.handle_shift_setting_button\n )\n self.break_setting_button_group = ButtonGroup(self.config[\n 'break_setting_buttons'], callback=self.handle_shift_setting_button\n )\n self.shift_time_in_seconds = self.config['shift_time_in_seconds']\n self.break_time_in_seconds = self.config['break_time_in_seconds']\n self.elapsed_shift_time_in_hours = 0\n self.progress_box = '◻︎' * (self.shift_time_in_seconds // 3600)\n self.start_pause_button = rumps.MenuItem(title=self.config['start'],\n callback=self.start_timer)\n self.stop_button = rumps.MenuItem(title=self.config['stop'],\n callback=None)\n self.app.menu = [{'Preferences': {'Setting Shift': self.\n shift_setting_button_group.buttons, 'Setting Break / hr': self.\n break_setting_button_group.buttons}}, None, self.\n start_pause_button, self.stop_button]\n <function token>\n <function token>\n <function token>\n\n def update_progress_box(self):\n self.progress_box = self.elapsed_shift_time_in_hours * '☑︎' + (self\n .shift_time_in_seconds // 3600 - self.elapsed_shift_time_in_hours\n ) * '◻︎'\n\n def start_timer(self, sender):\n if sender.title.lower().startswith(('start', 'continue')):\n if sender.title == self.config['start']:\n self.timer.count = 0\n self.timer.end = self.shift_time_in_seconds\n sender.title = self.config['pause']\n self.timer.start()\n else:\n sender.title = self.config['continue']\n self.timer.stop()\n <function token>\n\n def handle_shift_setting_button(self, sender):\n self.shift_setting_button_group.toggle(sender)\n selected_hours = int(match('^\\\\d+\\\\s{1}', sender.title)[0])\n self.progress_box = '◻︎' * selected_hours\n self.shift_time_in_seconds = selected_hours * 3600\n <function token>\n\n def run(self):\n self.app.run()\n\n\n<code token>\n", "<import token>\n\n\nclass RepeatWorkBreak(rumps.App):\n\n def __init__(self):\n rumps.debug_mode(True)\n self.config = {'app_title': 'Repeat Work and Break', 'start':\n 'Start', 'pause': 'Pause Timer', 'continue': 'Continue Timer',\n 'stop': 'Stop Timer', 'timeout_message':\n 'Time is up! Take a break :)', 'shift_time_in_seconds': 60 * 60 *\n 1, 'break_time_in_seconds': 60 * 5, 'shift_setting_buttons': [{\n 'title': '1 hour'}, {'title': '4 hour'}, {'title': '8 hour'}],\n 'break_setting_buttons': [{'title': '5 minutes'}, {'title':\n '10 minutes'}, {'title': '15 minutes'}]}\n self.app = rumps.App(self.config['app_title'])\n self.timer = rumps.Timer(self.on_tick, 1)\n self.shift_setting_button_group = ButtonGroup(self.config[\n 'shift_setting_buttons'], callback=self.handle_shift_setting_button\n )\n self.break_setting_button_group = ButtonGroup(self.config[\n 'break_setting_buttons'], callback=self.handle_shift_setting_button\n )\n self.shift_time_in_seconds = self.config['shift_time_in_seconds']\n self.break_time_in_seconds = self.config['break_time_in_seconds']\n self.elapsed_shift_time_in_hours = 0\n self.progress_box = '◻︎' * (self.shift_time_in_seconds // 3600)\n self.start_pause_button = rumps.MenuItem(title=self.config['start'],\n callback=self.start_timer)\n self.stop_button = rumps.MenuItem(title=self.config['stop'],\n callback=None)\n self.app.menu = [{'Preferences': {'Setting Shift': self.\n shift_setting_button_group.buttons, 'Setting Break / hr': self.\n break_setting_button_group.buttons}}, None, self.\n start_pause_button, self.stop_button]\n <function token>\n <function token>\n <function token>\n\n def update_progress_box(self):\n self.progress_box = self.elapsed_shift_time_in_hours * '☑︎' + (self\n .shift_time_in_seconds // 3600 - self.elapsed_shift_time_in_hours\n ) * '◻︎'\n <function token>\n <function token>\n\n def handle_shift_setting_button(self, sender):\n self.shift_setting_button_group.toggle(sender)\n selected_hours = int(match('^\\\\d+\\\\s{1}', sender.title)[0])\n self.progress_box = '◻︎' * selected_hours\n self.shift_time_in_seconds = selected_hours * 3600\n <function token>\n\n def run(self):\n self.app.run()\n\n\n<code token>\n", "<import token>\n\n\nclass RepeatWorkBreak(rumps.App):\n\n def __init__(self):\n rumps.debug_mode(True)\n self.config = {'app_title': 'Repeat Work and Break', 'start':\n 'Start', 'pause': 'Pause Timer', 'continue': 'Continue Timer',\n 'stop': 'Stop Timer', 'timeout_message':\n 'Time is up! Take a break :)', 'shift_time_in_seconds': 60 * 60 *\n 1, 'break_time_in_seconds': 60 * 5, 'shift_setting_buttons': [{\n 'title': '1 hour'}, {'title': '4 hour'}, {'title': '8 hour'}],\n 'break_setting_buttons': [{'title': '5 minutes'}, {'title':\n '10 minutes'}, {'title': '15 minutes'}]}\n self.app = rumps.App(self.config['app_title'])\n self.timer = rumps.Timer(self.on_tick, 1)\n self.shift_setting_button_group = ButtonGroup(self.config[\n 'shift_setting_buttons'], callback=self.handle_shift_setting_button\n )\n self.break_setting_button_group = ButtonGroup(self.config[\n 'break_setting_buttons'], callback=self.handle_shift_setting_button\n )\n self.shift_time_in_seconds = self.config['shift_time_in_seconds']\n self.break_time_in_seconds = self.config['break_time_in_seconds']\n self.elapsed_shift_time_in_hours = 0\n self.progress_box = '◻︎' * (self.shift_time_in_seconds // 3600)\n self.start_pause_button = rumps.MenuItem(title=self.config['start'],\n callback=self.start_timer)\n self.stop_button = rumps.MenuItem(title=self.config['stop'],\n callback=None)\n self.app.menu = [{'Preferences': {'Setting Shift': self.\n shift_setting_button_group.buttons, 'Setting Break / hr': self.\n break_setting_button_group.buttons}}, None, self.\n start_pause_button, self.stop_button]\n <function token>\n <function token>\n <function token>\n\n def update_progress_box(self):\n self.progress_box = self.elapsed_shift_time_in_hours * '☑︎' + (self\n .shift_time_in_seconds // 3600 - self.elapsed_shift_time_in_hours\n ) * '◻︎'\n <function token>\n <function token>\n\n def handle_shift_setting_button(self, sender):\n self.shift_setting_button_group.toggle(sender)\n selected_hours = int(match('^\\\\d+\\\\s{1}', sender.title)[0])\n self.progress_box = '◻︎' * selected_hours\n self.shift_time_in_seconds = selected_hours * 3600\n <function token>\n <function token>\n\n\n<code token>\n", "<import token>\n\n\nclass RepeatWorkBreak(rumps.App):\n <function token>\n <function token>\n <function token>\n <function token>\n\n def update_progress_box(self):\n self.progress_box = self.elapsed_shift_time_in_hours * '☑︎' + (self\n .shift_time_in_seconds // 3600 - self.elapsed_shift_time_in_hours\n ) * '◻︎'\n <function token>\n <function token>\n\n def handle_shift_setting_button(self, sender):\n self.shift_setting_button_group.toggle(sender)\n selected_hours = int(match('^\\\\d+\\\\s{1}', sender.title)[0])\n self.progress_box = '◻︎' * selected_hours\n self.shift_time_in_seconds = selected_hours * 3600\n <function token>\n <function token>\n\n\n<code token>\n", "<import token>\n\n\nclass RepeatWorkBreak(rumps.App):\n <function token>\n <function token>\n <function token>\n <function token>\n\n def update_progress_box(self):\n self.progress_box = self.elapsed_shift_time_in_hours * '☑︎' + (self\n .shift_time_in_seconds // 3600 - self.elapsed_shift_time_in_hours\n ) * '◻︎'\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n", "<import token>\n\n\nclass RepeatWorkBreak(rumps.App):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n", "<import token>\n<class token>\n<code token>\n" ]
false
361
8bc40ed4fe1091ecdb40cd55ff9cf53010078823
import requests import json data = json.load(open("dummy_data/data.json")) for one in data: print(one) r = requests.post("http://localhost:8080/sumari", json=one) print(r.text)
[ "import requests\nimport json\n\ndata = json.load(open(\"dummy_data/data.json\"))\n\nfor one in data:\n print(one)\n r = requests.post(\"http://localhost:8080/sumari\", json=one)\n print(r.text)\n", "import requests\nimport json\ndata = json.load(open('dummy_data/data.json'))\nfor one in data:\n print(one)\n r = requests.post('http://localhost:8080/sumari', json=one)\n print(r.text)\n", "<import token>\ndata = json.load(open('dummy_data/data.json'))\nfor one in data:\n print(one)\n r = requests.post('http://localhost:8080/sumari', json=one)\n print(r.text)\n", "<import token>\n<assignment token>\nfor one in data:\n print(one)\n r = requests.post('http://localhost:8080/sumari', json=one)\n print(r.text)\n", "<import token>\n<assignment token>\n<code token>\n" ]
false
362
4e9a968842c2b3eca79690f0b56c8e176b203138
print((9*int(input())/5)+32)
[ "print((9*int(input())/5)+32)", "print(9 * int(input()) / 5 + 32)\n", "<code token>\n" ]
false
363
7a1bd2b4734527a414c6173ea8edb150221f8042
import numpy as np import pandas as pd from scipy.optimize import minimize from datetime import datetime import time from functions import weather_scraper def getData(): # # run weather_scraper.py to fetch new weather data # weather_scraper.getData() ## Read in csv file "weather_data.csv" weather_data = pd.read_csv("data/weather_data.csv") # Grab the current month & hour currentMonth = datetime.now().month currentHour = datetime.now().hour # Determine which month group the current month is [0,5] currentMonthGroup = currentMonth // 2 hoep_data = [] temp = weather_data.iloc[:,2] # Change hour string to number from 0-23 for i in range(len(temp)): weather_data.iloc[i,1] = (currentHour + i) % 24 # Convert temperature data to HOEP data if (currentMonthGroup == 0) : hoep_data = temp.apply(lambda x: (2.02887*x + 39.633)/100) elif (currentMonthGroup == 1): hoep_data = temp.apply(lambda x: (0.453122*x + 19.8276)/100) elif (currentMonthGroup == 2): hoep_data = temp.apply(lambda x: (1.13665*x - 11.0085)/100) elif (currentMonthGroup == 3): hoep_data = temp.apply(lambda x: (1.90245*x - 23.2826)/100) elif (currentMonthGroup == 4): hoep_data = temp.apply(lambda x: (1.39145*x - 8.97971)/100) else: hoep_data = temp.apply(lambda x: (1.72767*x + 21.3536)/100) # Load in the load_data load_data = pd.read_excel('data/load_data.xlsx', index_col=0, engine = 'openpyxl') # Create loading schedule based on current time of day and month load_sched = np.arange(48) for i in range(len(temp)): load_sched[i] = load_data.iloc[ weather_data.iloc[i,1] , currentMonthGroup] WMST = 0.003499 ## x[0:48] = PCEA ## x[48:96] = ESB start_time = time.time() # Constraints to ensure that ESB falls within limits def constraint1(x): for i in range(48): if (i == 0): x[48] = 0 else: x[48+i] = x[48+i-1] + x[i] return x[0:48] + x[48:96] def constraint2(x): for i in range(48): if (i == 0): x[48] = 0 else: x[48+i] = x[48+i-1] + x[i] return 10000 - (x[0:48]+ x[48:96]) power = ((-5000, 5000),) * 48 storage = ((0, 10000),) * 48 #Objective def MEC(x): # ( PDLL + PCEA ) x HOEP return sum(sum( (load_sched + np.array([x[0:48]])) * (np.array(hoep_data)+WMST) )) x0 = np.array([np.ones(48), np.ones(48)]) bounds = (power + storage) cons1 = {'type': 'ineq', 'fun': constraint1} cons2 = {'type': 'ineq', 'fun': constraint2} cons = ([cons1, cons2]) sol = minimize(MEC, x0, method='SLSQP',bounds=bounds,constraints=cons,options= {'maxiter':150,'disp':True}) input_var = {"EA_w_bill": round(sol.fun,2)} return input_var
[ "import numpy as np\nimport pandas as pd\nfrom scipy.optimize import minimize\nfrom datetime import datetime\nimport time\nfrom functions import weather_scraper\n\ndef getData():\n # # run weather_scraper.py to fetch new weather data\n # weather_scraper.getData()\n\n ## Read in csv file \"weather_data.csv\"\n weather_data = pd.read_csv(\"data/weather_data.csv\")\n\n # Grab the current month & hour\n currentMonth = datetime.now().month\n currentHour = datetime.now().hour\n\n # Determine which month group the current month is [0,5]\n currentMonthGroup = currentMonth // 2\n\n hoep_data = []\n temp = weather_data.iloc[:,2]\n\n # Change hour string to number from 0-23\n for i in range(len(temp)): \n weather_data.iloc[i,1] = (currentHour + i) % 24\n\n # Convert temperature data to HOEP data\n if (currentMonthGroup == 0) :\n hoep_data = temp.apply(lambda x: (2.02887*x + 39.633)/100)\n elif (currentMonthGroup == 1):\n hoep_data = temp.apply(lambda x: (0.453122*x + 19.8276)/100)\n elif (currentMonthGroup == 2):\n hoep_data = temp.apply(lambda x: (1.13665*x - 11.0085)/100)\n elif (currentMonthGroup == 3):\n hoep_data = temp.apply(lambda x: (1.90245*x - 23.2826)/100)\n elif (currentMonthGroup == 4): \n hoep_data = temp.apply(lambda x: (1.39145*x - 8.97971)/100)\n else:\n hoep_data = temp.apply(lambda x: (1.72767*x + 21.3536)/100)\n\n # Load in the load_data\n load_data = pd.read_excel('data/load_data.xlsx', index_col=0, engine = 'openpyxl')\n\n # Create loading schedule based on current time of day and month\n load_sched = np.arange(48)\n\n for i in range(len(temp)):\n load_sched[i] = load_data.iloc[ weather_data.iloc[i,1] , currentMonthGroup]\n\n WMST = 0.003499 \n\n ## x[0:48] = PCEA\n ## x[48:96] = ESB\n\n start_time = time.time()\n\n # Constraints to ensure that ESB falls within limits\n def constraint1(x):\n for i in range(48):\n if (i == 0):\n x[48] = 0\n else:\n x[48+i] = x[48+i-1] + x[i]\n return x[0:48] + x[48:96]\n \n def constraint2(x):\n for i in range(48):\n if (i == 0):\n x[48] = 0\n else:\n x[48+i] = x[48+i-1] + x[i]\n return 10000 - (x[0:48]+ x[48:96])\n \n\n power = ((-5000, 5000),) * 48\n storage = ((0, 10000),) * 48\n\n #Objective\n def MEC(x): # ( PDLL + PCEA ) x HOEP\n return sum(sum( (load_sched + np.array([x[0:48]])) * (np.array(hoep_data)+WMST) ))\n\n x0 = np.array([np.ones(48), np.ones(48)])\n\n bounds = (power + storage)\n cons1 = {'type': 'ineq', 'fun': constraint1}\n cons2 = {'type': 'ineq', 'fun': constraint2}\n\n cons = ([cons1, cons2])\n\n sol = minimize(MEC, x0, method='SLSQP',bounds=bounds,constraints=cons,options= {'maxiter':150,'disp':True})\n\n input_var = {\"EA_w_bill\": round(sol.fun,2)}\n\n return input_var\n", "import numpy as np\nimport pandas as pd\nfrom scipy.optimize import minimize\nfrom datetime import datetime\nimport time\nfrom functions import weather_scraper\n\n\ndef getData():\n weather_data = pd.read_csv('data/weather_data.csv')\n currentMonth = datetime.now().month\n currentHour = datetime.now().hour\n currentMonthGroup = currentMonth // 2\n hoep_data = []\n temp = weather_data.iloc[:, 2]\n for i in range(len(temp)):\n weather_data.iloc[i, 1] = (currentHour + i) % 24\n if currentMonthGroup == 0:\n hoep_data = temp.apply(lambda x: (2.02887 * x + 39.633) / 100)\n elif currentMonthGroup == 1:\n hoep_data = temp.apply(lambda x: (0.453122 * x + 19.8276) / 100)\n elif currentMonthGroup == 2:\n hoep_data = temp.apply(lambda x: (1.13665 * x - 11.0085) / 100)\n elif currentMonthGroup == 3:\n hoep_data = temp.apply(lambda x: (1.90245 * x - 23.2826) / 100)\n elif currentMonthGroup == 4:\n hoep_data = temp.apply(lambda x: (1.39145 * x - 8.97971) / 100)\n else:\n hoep_data = temp.apply(lambda x: (1.72767 * x + 21.3536) / 100)\n load_data = pd.read_excel('data/load_data.xlsx', index_col=0, engine=\n 'openpyxl')\n load_sched = np.arange(48)\n for i in range(len(temp)):\n load_sched[i] = load_data.iloc[weather_data.iloc[i, 1],\n currentMonthGroup]\n WMST = 0.003499\n start_time = time.time()\n\n def constraint1(x):\n for i in range(48):\n if i == 0:\n x[48] = 0\n else:\n x[48 + i] = x[48 + i - 1] + x[i]\n return x[0:48] + x[48:96]\n\n def constraint2(x):\n for i in range(48):\n if i == 0:\n x[48] = 0\n else:\n x[48 + i] = x[48 + i - 1] + x[i]\n return 10000 - (x[0:48] + x[48:96])\n power = ((-5000, 5000),) * 48\n storage = ((0, 10000),) * 48\n\n def MEC(x):\n return sum(sum((load_sched + np.array([x[0:48]])) * (np.array(\n hoep_data) + WMST)))\n x0 = np.array([np.ones(48), np.ones(48)])\n bounds = power + storage\n cons1 = {'type': 'ineq', 'fun': constraint1}\n cons2 = {'type': 'ineq', 'fun': constraint2}\n cons = [cons1, cons2]\n sol = minimize(MEC, x0, method='SLSQP', bounds=bounds, constraints=cons,\n options={'maxiter': 150, 'disp': True})\n input_var = {'EA_w_bill': round(sol.fun, 2)}\n return input_var\n", "<import token>\n\n\ndef getData():\n weather_data = pd.read_csv('data/weather_data.csv')\n currentMonth = datetime.now().month\n currentHour = datetime.now().hour\n currentMonthGroup = currentMonth // 2\n hoep_data = []\n temp = weather_data.iloc[:, 2]\n for i in range(len(temp)):\n weather_data.iloc[i, 1] = (currentHour + i) % 24\n if currentMonthGroup == 0:\n hoep_data = temp.apply(lambda x: (2.02887 * x + 39.633) / 100)\n elif currentMonthGroup == 1:\n hoep_data = temp.apply(lambda x: (0.453122 * x + 19.8276) / 100)\n elif currentMonthGroup == 2:\n hoep_data = temp.apply(lambda x: (1.13665 * x - 11.0085) / 100)\n elif currentMonthGroup == 3:\n hoep_data = temp.apply(lambda x: (1.90245 * x - 23.2826) / 100)\n elif currentMonthGroup == 4:\n hoep_data = temp.apply(lambda x: (1.39145 * x - 8.97971) / 100)\n else:\n hoep_data = temp.apply(lambda x: (1.72767 * x + 21.3536) / 100)\n load_data = pd.read_excel('data/load_data.xlsx', index_col=0, engine=\n 'openpyxl')\n load_sched = np.arange(48)\n for i in range(len(temp)):\n load_sched[i] = load_data.iloc[weather_data.iloc[i, 1],\n currentMonthGroup]\n WMST = 0.003499\n start_time = time.time()\n\n def constraint1(x):\n for i in range(48):\n if i == 0:\n x[48] = 0\n else:\n x[48 + i] = x[48 + i - 1] + x[i]\n return x[0:48] + x[48:96]\n\n def constraint2(x):\n for i in range(48):\n if i == 0:\n x[48] = 0\n else:\n x[48 + i] = x[48 + i - 1] + x[i]\n return 10000 - (x[0:48] + x[48:96])\n power = ((-5000, 5000),) * 48\n storage = ((0, 10000),) * 48\n\n def MEC(x):\n return sum(sum((load_sched + np.array([x[0:48]])) * (np.array(\n hoep_data) + WMST)))\n x0 = np.array([np.ones(48), np.ones(48)])\n bounds = power + storage\n cons1 = {'type': 'ineq', 'fun': constraint1}\n cons2 = {'type': 'ineq', 'fun': constraint2}\n cons = [cons1, cons2]\n sol = minimize(MEC, x0, method='SLSQP', bounds=bounds, constraints=cons,\n options={'maxiter': 150, 'disp': True})\n input_var = {'EA_w_bill': round(sol.fun, 2)}\n return input_var\n", "<import token>\n<function token>\n" ]
false
364
1bdb19373960e4f63d80d6ab73ec3c0939e40b7f
import contextlib import dask import dask.array as da import packaging.version import pandas import six import sklearn SK_VERSION = packaging.version.parse(sklearn.__version__) DASK_VERSION = packaging.version.parse(dask.__version__) PANDAS_VERSION = packaging.version.parse(pandas.__version__) @contextlib.contextmanager def dummy_context(*args, **kwargs): yield if six.PY2: from collections import Mapping else: from collections.abc import Mapping # noqa if DASK_VERSION < packaging.version.parse("1.1.0"): blockwise = da.atop else: blockwise = da.blockwise
[ "import contextlib\n\nimport dask\nimport dask.array as da\nimport packaging.version\nimport pandas\nimport six\nimport sklearn\n\nSK_VERSION = packaging.version.parse(sklearn.__version__)\nDASK_VERSION = packaging.version.parse(dask.__version__)\nPANDAS_VERSION = packaging.version.parse(pandas.__version__)\n\n\[email protected]\ndef dummy_context(*args, **kwargs):\n yield\n\n\nif six.PY2:\n from collections import Mapping\nelse:\n from collections.abc import Mapping # noqa\n\nif DASK_VERSION < packaging.version.parse(\"1.1.0\"):\n blockwise = da.atop\nelse:\n blockwise = da.blockwise\n", "import contextlib\nimport dask\nimport dask.array as da\nimport packaging.version\nimport pandas\nimport six\nimport sklearn\nSK_VERSION = packaging.version.parse(sklearn.__version__)\nDASK_VERSION = packaging.version.parse(dask.__version__)\nPANDAS_VERSION = packaging.version.parse(pandas.__version__)\n\n\[email protected]\ndef dummy_context(*args, **kwargs):\n yield\n\n\nif six.PY2:\n from collections import Mapping\nelse:\n from collections.abc import Mapping\nif DASK_VERSION < packaging.version.parse('1.1.0'):\n blockwise = da.atop\nelse:\n blockwise = da.blockwise\n", "<import token>\nSK_VERSION = packaging.version.parse(sklearn.__version__)\nDASK_VERSION = packaging.version.parse(dask.__version__)\nPANDAS_VERSION = packaging.version.parse(pandas.__version__)\n\n\[email protected]\ndef dummy_context(*args, **kwargs):\n yield\n\n\nif six.PY2:\n from collections import Mapping\nelse:\n from collections.abc import Mapping\nif DASK_VERSION < packaging.version.parse('1.1.0'):\n blockwise = da.atop\nelse:\n blockwise = da.blockwise\n", "<import token>\n<assignment token>\n\n\[email protected]\ndef dummy_context(*args, **kwargs):\n yield\n\n\nif six.PY2:\n from collections import Mapping\nelse:\n from collections.abc import Mapping\nif DASK_VERSION < packaging.version.parse('1.1.0'):\n blockwise = da.atop\nelse:\n blockwise = da.blockwise\n", "<import token>\n<assignment token>\n\n\[email protected]\ndef dummy_context(*args, **kwargs):\n yield\n\n\n<code token>\n", "<import token>\n<assignment token>\n<function token>\n<code token>\n" ]
false
365
a4f932a8566afe0265dc1057d0f6534a608697f7
""" LeetCode Problem: 242. Valid Anagram Link: https://leetcode.com/problems/valid-anagram/ Written by: Mostofa Adib Shakib Language: Python """ class Solution(object): def isAnagram(self, s, t): """ :type s: str :type t: str :rtype: bool """ length1 = len(s) length2 = len(t) if length1 != length2: return False s = sorted(s) #sorted the string in alphanumeric order t = sorted(t) #sorted the string in alphanumeric order for i in range(0, length1): if s[i] != t[i]: return False # return false if the two sorted strings are not the same. return True # if the sorted strings are same return True
[ "\"\"\"\nLeetCode Problem: 242. Valid Anagram\nLink: https://leetcode.com/problems/valid-anagram/\nWritten by: Mostofa Adib Shakib\nLanguage: Python\n\"\"\"\n\nclass Solution(object):\n def isAnagram(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: bool\n \"\"\"\n \n length1 = len(s)\n length2 = len(t)\n \n if length1 != length2:\n return False\n \n s = sorted(s) #sorted the string in alphanumeric order\n t = sorted(t) #sorted the string in alphanumeric order\n \n for i in range(0, length1):\n if s[i] != t[i]:\n return False # return false if the two sorted strings are not the same.\n\n return True # if the sorted strings are same return True", "<docstring token>\n\n\nclass Solution(object):\n\n def isAnagram(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: bool\n \"\"\"\n length1 = len(s)\n length2 = len(t)\n if length1 != length2:\n return False\n s = sorted(s)\n t = sorted(t)\n for i in range(0, length1):\n if s[i] != t[i]:\n return False\n return True\n", "<docstring token>\n\n\nclass Solution(object):\n <function token>\n", "<docstring token>\n<class token>\n" ]
false
366
6dd11f71e514a46462bf0b97ddac9ea474e86ad0
import os, glob import numpy as np from ..algorithms.utils import get_file_manager from ..algorithms.clustered_writes import * from ..exp_utils import create_empty_dir def test_get_entity_sizes(): # in C order bytes_per_voxel = 1 R = (10,9,10) cs = (5,3,2) partition = (2,3,5) bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition) assert bs == 5*3*2 assert brs == 5*3*2*5 assert bss == 5*3*2*5*3 def test_get_strategy(): # in C order bytes_per_voxel = 1 R = (20,9,10) cs = (5,3,2) partition = (4,3,5) bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition) test_case = { 5*2*3: 0, # 1 block 5*2*3*4: 0, # 4 blocks 5*2*3*5: 1, # 1 row 5*2*3*5*2: 1, # 2 rows 5*2*3*5*3: 2, # 1 slice 5*2*3*5*3*3: 2, # 3 slices 5*2*3*5*3*4: 2, # whole img 5*2*3*5*3*7: 2, # whole img (more mem than necessary) } for buffer_mem_size, expected in test_case.items(): strategy = get_strategy(buffer_mem_size, bs, brs, bss) assert strategy == expected def test_compute_buffers(): # in C order bytes_per_voxel = 1 R = (20,9,10) cs = (5,3,2) partition = (4,3,5) bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition) origarr_size = R[0]*R[1]*R[2]*bytes_per_voxel test_case = { 5*2*3: 4*3*5, # 1 block 5*2*3*4: 4*3*2, # 4 blocks 5*2*3*5: 4*3, # 1 row 5*2*3*5*2: 4*2, # 2 rows 5*2*3*5*3: 4, # 1 slice 5*2*3*5*3*3: 2, # 3 slices 5*2*3*5*3*4: 1, # whole img 5*2*3*5*3*7: 1, # whole img (more mem than necessary) } for buffer_mem_size, expected in test_case.items(): strategy = get_strategy(buffer_mem_size, bs, brs, bss) buffers = compute_buffers(buffer_mem_size, strategy, origarr_size, cs, bs, brs, bss, partition, R, bytes_per_voxel) # test number of buffers nb_buffers = len(buffers.values()) assert nb_buffers == expected def test_clustered_writes(): bpv = 1 R = (20,9,10) cs = (5,3,2) ff = 'HDF5' outdir_path = './outdir' test_case = [ 5*3*2, # 1 block 5*3*2*4, # 4 blocks 5*3*2*5, # 1 row 5*3*2*5*2, # 2 rows 5*3*2*5*3, # 1 slice 5*3*2*5*3*3, # 3 slices 5*3*2*5*3*4, # whole img 5*3*2*5*3*7, # whole img (more mem than necessary) ] nb_chunks = 4*3*5 # create input array origarr_filepath = './original_array.hdf5' data = np.random.normal(size=R) fm = get_file_manager(ff) if os.path.isfile(origarr_filepath): os.remove(origarr_filepath) fm.write(origarr_filepath, data, R, _slices=None) for m in test_case: create_empty_dir(outdir_path) clustered_writes(origarr_filepath, R, cs, bpv, m, ff, outdir_path) workdir = os.getcwd() os.chdir(outdir_path) filenames = list() for filename in glob.glob("*.hdf5"): arr = fm.read_all(filename) assert arr.shape == cs filenames.append(filename) assert len(filenames) == nb_chunks os.chdir(workdir)
[ "import os, glob\nimport numpy as np\n\nfrom ..algorithms.utils import get_file_manager\nfrom ..algorithms.clustered_writes import *\nfrom ..exp_utils import create_empty_dir\n\n\ndef test_get_entity_sizes():\n # in C order\n bytes_per_voxel = 1\n R = (10,9,10)\n cs = (5,3,2)\n partition = (2,3,5)\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n\n assert bs == 5*3*2\n assert brs == 5*3*2*5\n assert bss == 5*3*2*5*3\n\n\ndef test_get_strategy():\n # in C order\n bytes_per_voxel = 1\n R = (20,9,10)\n cs = (5,3,2)\n partition = (4,3,5)\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n \n test_case = {\n 5*2*3: 0, # 1 block \n 5*2*3*4: 0, # 4 blocks \n 5*2*3*5: 1, # 1 row \n 5*2*3*5*2: 1, # 2 rows\n 5*2*3*5*3: 2, # 1 slice \n 5*2*3*5*3*3: 2, # 3 slices \n 5*2*3*5*3*4: 2, # whole img\n 5*2*3*5*3*7: 2, # whole img (more mem than necessary)\n }\n\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n assert strategy == expected\n\n\ndef test_compute_buffers():\n # in C order\n bytes_per_voxel = 1\n R = (20,9,10)\n cs = (5,3,2)\n partition = (4,3,5)\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n origarr_size = R[0]*R[1]*R[2]*bytes_per_voxel\n \n test_case = {\n 5*2*3: 4*3*5, # 1 block \n 5*2*3*4: 4*3*2, # 4 blocks \n 5*2*3*5: 4*3, # 1 row \n 5*2*3*5*2: 4*2, # 2 rows\n 5*2*3*5*3: 4, # 1 slice \n 5*2*3*5*3*3: 2, # 3 slices \n 5*2*3*5*3*4: 1, # whole img\n 5*2*3*5*3*7: 1, # whole img (more mem than necessary)\n }\n\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n buffers = compute_buffers(buffer_mem_size, strategy, origarr_size, cs, bs, brs, bss, partition, R, bytes_per_voxel)\n\n # test number of buffers\n nb_buffers = len(buffers.values())\n assert nb_buffers == expected\n\n\ndef test_clustered_writes():\n bpv = 1\n R = (20,9,10)\n cs = (5,3,2)\n ff = 'HDF5'\n outdir_path = './outdir'\n\n test_case = [\n 5*3*2, # 1 block \n 5*3*2*4, # 4 blocks \n 5*3*2*5, # 1 row \n 5*3*2*5*2, # 2 rows\n 5*3*2*5*3, # 1 slice \n 5*3*2*5*3*3, # 3 slices \n 5*3*2*5*3*4, # whole img\n 5*3*2*5*3*7, # whole img (more mem than necessary)\n ]\n\n nb_chunks = 4*3*5\n\n # create input array\n origarr_filepath = './original_array.hdf5'\n data = np.random.normal(size=R)\n fm = get_file_manager(ff)\n if os.path.isfile(origarr_filepath):\n os.remove(origarr_filepath)\n fm.write(origarr_filepath, data, R, _slices=None)\n \n for m in test_case:\n create_empty_dir(outdir_path)\n clustered_writes(origarr_filepath, R, cs, bpv, m, ff, outdir_path)\n\n workdir = os.getcwd()\n os.chdir(outdir_path)\n filenames = list()\n for filename in glob.glob(\"*.hdf5\"):\n arr = fm.read_all(filename)\n assert arr.shape == cs\n filenames.append(filename)\n\n assert len(filenames) == nb_chunks\n os.chdir(workdir)\n\n \n", "import os, glob\nimport numpy as np\nfrom ..algorithms.utils import get_file_manager\nfrom ..algorithms.clustered_writes import *\nfrom ..exp_utils import create_empty_dir\n\n\ndef test_get_entity_sizes():\n bytes_per_voxel = 1\n R = 10, 9, 10\n cs = 5, 3, 2\n partition = 2, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n assert bs == 5 * 3 * 2\n assert brs == 5 * 3 * 2 * 5\n assert bss == 5 * 3 * 2 * 5 * 3\n\n\ndef test_get_strategy():\n bytes_per_voxel = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n partition = 4, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n test_case = {(5 * 2 * 3): 0, (5 * 2 * 3 * 4): 0, (5 * 2 * 3 * 5): 1, (5 *\n 2 * 3 * 5 * 2): 1, (5 * 2 * 3 * 5 * 3): 2, (5 * 2 * 3 * 5 * 3 * 3):\n 2, (5 * 2 * 3 * 5 * 3 * 4): 2, (5 * 2 * 3 * 5 * 3 * 7): 2}\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n assert strategy == expected\n\n\ndef test_compute_buffers():\n bytes_per_voxel = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n partition = 4, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n origarr_size = R[0] * R[1] * R[2] * bytes_per_voxel\n test_case = {(5 * 2 * 3): 4 * 3 * 5, (5 * 2 * 3 * 4): 4 * 3 * 2, (5 * 2 *\n 3 * 5): 4 * 3, (5 * 2 * 3 * 5 * 2): 4 * 2, (5 * 2 * 3 * 5 * 3): 4,\n (5 * 2 * 3 * 5 * 3 * 3): 2, (5 * 2 * 3 * 5 * 3 * 4): 1, (5 * 2 * 3 *\n 5 * 3 * 7): 1}\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n buffers = compute_buffers(buffer_mem_size, strategy, origarr_size,\n cs, bs, brs, bss, partition, R, bytes_per_voxel)\n nb_buffers = len(buffers.values())\n assert nb_buffers == expected\n\n\ndef test_clustered_writes():\n bpv = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n ff = 'HDF5'\n outdir_path = './outdir'\n test_case = [5 * 3 * 2, 5 * 3 * 2 * 4, 5 * 3 * 2 * 5, 5 * 3 * 2 * 5 * 2,\n 5 * 3 * 2 * 5 * 3, 5 * 3 * 2 * 5 * 3 * 3, 5 * 3 * 2 * 5 * 3 * 4, 5 *\n 3 * 2 * 5 * 3 * 7]\n nb_chunks = 4 * 3 * 5\n origarr_filepath = './original_array.hdf5'\n data = np.random.normal(size=R)\n fm = get_file_manager(ff)\n if os.path.isfile(origarr_filepath):\n os.remove(origarr_filepath)\n fm.write(origarr_filepath, data, R, _slices=None)\n for m in test_case:\n create_empty_dir(outdir_path)\n clustered_writes(origarr_filepath, R, cs, bpv, m, ff, outdir_path)\n workdir = os.getcwd()\n os.chdir(outdir_path)\n filenames = list()\n for filename in glob.glob('*.hdf5'):\n arr = fm.read_all(filename)\n assert arr.shape == cs\n filenames.append(filename)\n assert len(filenames) == nb_chunks\n os.chdir(workdir)\n", "<import token>\n\n\ndef test_get_entity_sizes():\n bytes_per_voxel = 1\n R = 10, 9, 10\n cs = 5, 3, 2\n partition = 2, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n assert bs == 5 * 3 * 2\n assert brs == 5 * 3 * 2 * 5\n assert bss == 5 * 3 * 2 * 5 * 3\n\n\ndef test_get_strategy():\n bytes_per_voxel = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n partition = 4, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n test_case = {(5 * 2 * 3): 0, (5 * 2 * 3 * 4): 0, (5 * 2 * 3 * 5): 1, (5 *\n 2 * 3 * 5 * 2): 1, (5 * 2 * 3 * 5 * 3): 2, (5 * 2 * 3 * 5 * 3 * 3):\n 2, (5 * 2 * 3 * 5 * 3 * 4): 2, (5 * 2 * 3 * 5 * 3 * 7): 2}\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n assert strategy == expected\n\n\ndef test_compute_buffers():\n bytes_per_voxel = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n partition = 4, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n origarr_size = R[0] * R[1] * R[2] * bytes_per_voxel\n test_case = {(5 * 2 * 3): 4 * 3 * 5, (5 * 2 * 3 * 4): 4 * 3 * 2, (5 * 2 *\n 3 * 5): 4 * 3, (5 * 2 * 3 * 5 * 2): 4 * 2, (5 * 2 * 3 * 5 * 3): 4,\n (5 * 2 * 3 * 5 * 3 * 3): 2, (5 * 2 * 3 * 5 * 3 * 4): 1, (5 * 2 * 3 *\n 5 * 3 * 7): 1}\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n buffers = compute_buffers(buffer_mem_size, strategy, origarr_size,\n cs, bs, brs, bss, partition, R, bytes_per_voxel)\n nb_buffers = len(buffers.values())\n assert nb_buffers == expected\n\n\ndef test_clustered_writes():\n bpv = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n ff = 'HDF5'\n outdir_path = './outdir'\n test_case = [5 * 3 * 2, 5 * 3 * 2 * 4, 5 * 3 * 2 * 5, 5 * 3 * 2 * 5 * 2,\n 5 * 3 * 2 * 5 * 3, 5 * 3 * 2 * 5 * 3 * 3, 5 * 3 * 2 * 5 * 3 * 4, 5 *\n 3 * 2 * 5 * 3 * 7]\n nb_chunks = 4 * 3 * 5\n origarr_filepath = './original_array.hdf5'\n data = np.random.normal(size=R)\n fm = get_file_manager(ff)\n if os.path.isfile(origarr_filepath):\n os.remove(origarr_filepath)\n fm.write(origarr_filepath, data, R, _slices=None)\n for m in test_case:\n create_empty_dir(outdir_path)\n clustered_writes(origarr_filepath, R, cs, bpv, m, ff, outdir_path)\n workdir = os.getcwd()\n os.chdir(outdir_path)\n filenames = list()\n for filename in glob.glob('*.hdf5'):\n arr = fm.read_all(filename)\n assert arr.shape == cs\n filenames.append(filename)\n assert len(filenames) == nb_chunks\n os.chdir(workdir)\n", "<import token>\n\n\ndef test_get_entity_sizes():\n bytes_per_voxel = 1\n R = 10, 9, 10\n cs = 5, 3, 2\n partition = 2, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n assert bs == 5 * 3 * 2\n assert brs == 5 * 3 * 2 * 5\n assert bss == 5 * 3 * 2 * 5 * 3\n\n\n<function token>\n\n\ndef test_compute_buffers():\n bytes_per_voxel = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n partition = 4, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n origarr_size = R[0] * R[1] * R[2] * bytes_per_voxel\n test_case = {(5 * 2 * 3): 4 * 3 * 5, (5 * 2 * 3 * 4): 4 * 3 * 2, (5 * 2 *\n 3 * 5): 4 * 3, (5 * 2 * 3 * 5 * 2): 4 * 2, (5 * 2 * 3 * 5 * 3): 4,\n (5 * 2 * 3 * 5 * 3 * 3): 2, (5 * 2 * 3 * 5 * 3 * 4): 1, (5 * 2 * 3 *\n 5 * 3 * 7): 1}\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n buffers = compute_buffers(buffer_mem_size, strategy, origarr_size,\n cs, bs, brs, bss, partition, R, bytes_per_voxel)\n nb_buffers = len(buffers.values())\n assert nb_buffers == expected\n\n\ndef test_clustered_writes():\n bpv = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n ff = 'HDF5'\n outdir_path = './outdir'\n test_case = [5 * 3 * 2, 5 * 3 * 2 * 4, 5 * 3 * 2 * 5, 5 * 3 * 2 * 5 * 2,\n 5 * 3 * 2 * 5 * 3, 5 * 3 * 2 * 5 * 3 * 3, 5 * 3 * 2 * 5 * 3 * 4, 5 *\n 3 * 2 * 5 * 3 * 7]\n nb_chunks = 4 * 3 * 5\n origarr_filepath = './original_array.hdf5'\n data = np.random.normal(size=R)\n fm = get_file_manager(ff)\n if os.path.isfile(origarr_filepath):\n os.remove(origarr_filepath)\n fm.write(origarr_filepath, data, R, _slices=None)\n for m in test_case:\n create_empty_dir(outdir_path)\n clustered_writes(origarr_filepath, R, cs, bpv, m, ff, outdir_path)\n workdir = os.getcwd()\n os.chdir(outdir_path)\n filenames = list()\n for filename in glob.glob('*.hdf5'):\n arr = fm.read_all(filename)\n assert arr.shape == cs\n filenames.append(filename)\n assert len(filenames) == nb_chunks\n os.chdir(workdir)\n", "<import token>\n\n\ndef test_get_entity_sizes():\n bytes_per_voxel = 1\n R = 10, 9, 10\n cs = 5, 3, 2\n partition = 2, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n assert bs == 5 * 3 * 2\n assert brs == 5 * 3 * 2 * 5\n assert bss == 5 * 3 * 2 * 5 * 3\n\n\n<function token>\n\n\ndef test_compute_buffers():\n bytes_per_voxel = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n partition = 4, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n origarr_size = R[0] * R[1] * R[2] * bytes_per_voxel\n test_case = {(5 * 2 * 3): 4 * 3 * 5, (5 * 2 * 3 * 4): 4 * 3 * 2, (5 * 2 *\n 3 * 5): 4 * 3, (5 * 2 * 3 * 5 * 2): 4 * 2, (5 * 2 * 3 * 5 * 3): 4,\n (5 * 2 * 3 * 5 * 3 * 3): 2, (5 * 2 * 3 * 5 * 3 * 4): 1, (5 * 2 * 3 *\n 5 * 3 * 7): 1}\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n buffers = compute_buffers(buffer_mem_size, strategy, origarr_size,\n cs, bs, brs, bss, partition, R, bytes_per_voxel)\n nb_buffers = len(buffers.values())\n assert nb_buffers == expected\n\n\n<function token>\n", "<import token>\n\n\ndef test_get_entity_sizes():\n bytes_per_voxel = 1\n R = 10, 9, 10\n cs = 5, 3, 2\n partition = 2, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n assert bs == 5 * 3 * 2\n assert brs == 5 * 3 * 2 * 5\n assert bss == 5 * 3 * 2 * 5 * 3\n\n\n<function token>\n<function token>\n<function token>\n", "<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n" ]
false
367
c09c02a36a64e9522cfc8c0951bd6c98f404f09c
# Random number guessing game. # 10 July 20 # CTI-110 P5HW1 - Random Number # Thelma Majette import random randomNumber = random.randint (1,100) # main function def main(): # Create a variable to control the loop. keep_going = 'y' while keep_going == 'y': # Ask user for a number () guess = int(input('\nGuess a number between 1 and 100: ')) # Perform the selected action. if guess > randomNumber: print ('\nToo high, try again.' ) elif guess < randomNumber: print ('\nToo low, try again' ) else: print ('\nCongratulations, you guessed the correct number!') keep_going ='n' main ()
[ "# Random number guessing game.\r\n# 10 July 20\r\n# CTI-110 P5HW1 - Random Number\r\n# Thelma Majette\r\n\r\nimport random\r\n\r\nrandomNumber = random.randint (1,100)\r\n\r\n# main function\r\ndef main():\r\n\r\n # Create a variable to control the loop.\r\n keep_going = 'y'\r\n while keep_going == 'y':\r\n\r\n # Ask user for a number ()\r\n guess = int(input('\\nGuess a number between 1 and 100: '))\r\n\r\n # Perform the selected action.\r\n if guess > randomNumber:\r\n print ('\\nToo high, try again.' )\r\n elif guess < randomNumber:\r\n print ('\\nToo low, try again' )\r\n else:\r\n print ('\\nCongratulations, you guessed the correct number!')\r\n keep_going ='n'\r\n \r\n \r\n \r\nmain () \r\n", "import random\nrandomNumber = random.randint(1, 100)\n\n\ndef main():\n keep_going = 'y'\n while keep_going == 'y':\n guess = int(input('\\nGuess a number between 1 and 100: '))\n if guess > randomNumber:\n print('\\nToo high, try again.')\n elif guess < randomNumber:\n print('\\nToo low, try again')\n else:\n print('\\nCongratulations, you guessed the correct number!')\n keep_going = 'n'\n\n\nmain()\n", "<import token>\nrandomNumber = random.randint(1, 100)\n\n\ndef main():\n keep_going = 'y'\n while keep_going == 'y':\n guess = int(input('\\nGuess a number between 1 and 100: '))\n if guess > randomNumber:\n print('\\nToo high, try again.')\n elif guess < randomNumber:\n print('\\nToo low, try again')\n else:\n print('\\nCongratulations, you guessed the correct number!')\n keep_going = 'n'\n\n\nmain()\n", "<import token>\n<assignment token>\n\n\ndef main():\n keep_going = 'y'\n while keep_going == 'y':\n guess = int(input('\\nGuess a number between 1 and 100: '))\n if guess > randomNumber:\n print('\\nToo high, try again.')\n elif guess < randomNumber:\n print('\\nToo low, try again')\n else:\n print('\\nCongratulations, you guessed the correct number!')\n keep_going = 'n'\n\n\nmain()\n", "<import token>\n<assignment token>\n\n\ndef main():\n keep_going = 'y'\n while keep_going == 'y':\n guess = int(input('\\nGuess a number between 1 and 100: '))\n if guess > randomNumber:\n print('\\nToo high, try again.')\n elif guess < randomNumber:\n print('\\nToo low, try again')\n else:\n print('\\nCongratulations, you guessed the correct number!')\n keep_going = 'n'\n\n\n<code token>\n", "<import token>\n<assignment token>\n<function token>\n<code token>\n" ]
false
368
798ddd4a6e4febb4664bf1c973877628d1a45c71
from django.conf.urls import patterns, include, url # Uncomment the next two lines to enable the admin: # from django.contrib import admin # admin.autodiscover() urlpatterns = patterns('accounts.views', url(r'^$', 'home', name='home'), url(r'^login/$', 'login', name='login'), url(r'^logout/$', 'logout', name='logout'), url(r'^register/$', 'register', name='register'), url(r'^dashboard/', 'dashboard', name='dashboard'), url(r'^rewards/', 'rewards', name='rewards'), url(r'get_all_data/', 'get_all_data', name='get_all_data'), )
[ "from django.conf.urls import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\n# from django.contrib import admin\n# admin.autodiscover()\n\nurlpatterns = patterns('accounts.views',\n url(r'^$', 'home', name='home'),\n url(r'^login/$', 'login', name='login'),\n url(r'^logout/$', 'logout', name='logout'),\n url(r'^register/$', 'register', name='register'),\n url(r'^dashboard/', 'dashboard', name='dashboard'),\n url(r'^rewards/', 'rewards', name='rewards'),\n url(r'get_all_data/', 'get_all_data', name='get_all_data'),\n)\n", "from django.conf.urls import patterns, include, url\nurlpatterns = patterns('accounts.views', url('^$', 'home', name='home'),\n url('^login/$', 'login', name='login'), url('^logout/$', 'logout', name\n ='logout'), url('^register/$', 'register', name='register'), url(\n '^dashboard/', 'dashboard', name='dashboard'), url('^rewards/',\n 'rewards', name='rewards'), url('get_all_data/', 'get_all_data', name=\n 'get_all_data'))\n", "<import token>\nurlpatterns = patterns('accounts.views', url('^$', 'home', name='home'),\n url('^login/$', 'login', name='login'), url('^logout/$', 'logout', name\n ='logout'), url('^register/$', 'register', name='register'), url(\n '^dashboard/', 'dashboard', name='dashboard'), url('^rewards/',\n 'rewards', name='rewards'), url('get_all_data/', 'get_all_data', name=\n 'get_all_data'))\n", "<import token>\n<assignment token>\n" ]
false
369
3340277df91f1421dab8d204eddce65b4604432b
from django.shortcuts import render, redirect from .models import Courses from django.views.generic import CreateView, ListView, UpdateView, DeleteView from .forms import CourceCreateForm from django.urls import reverse_lazy from django.urls import reverse class CourceListView(ListView): model = Courses template_name = 'cources/cource_list.html' context_object_name = 'cources' class CourceCreateView(CreateView): template_name = 'cources/create_cource.html' form_class = CourceCreateForm success_url = reverse_lazy('cources:cource_list') class CourceUpdateView(UpdateView): model = Courses form_class = CourceCreateForm template_name = 'cources/course_update.html' success_url = reverse_lazy('cources:cource_list') def DeleteView(request, pk): cource = Courses.objects.filter(pk=pk) cource.delete() return redirect(reverse('cources:cource_list'))
[ "from django.shortcuts import render, redirect\nfrom .models import Courses\nfrom django.views.generic import CreateView, ListView, UpdateView, DeleteView\nfrom .forms import CourceCreateForm\nfrom django.urls import reverse_lazy\nfrom django.urls import reverse\n\nclass CourceListView(ListView):\n model = Courses\n template_name = 'cources/cource_list.html'\n context_object_name = 'cources'\n\n\n\nclass CourceCreateView(CreateView):\n template_name = 'cources/create_cource.html'\n form_class = CourceCreateForm\n success_url = reverse_lazy('cources:cource_list')\n\n\n\nclass CourceUpdateView(UpdateView):\n model = Courses\n form_class = CourceCreateForm\n template_name = 'cources/course_update.html'\n success_url = reverse_lazy('cources:cource_list')\n\n\ndef DeleteView(request, pk):\n cource = Courses.objects.filter(pk=pk)\n cource.delete()\n\n return redirect(reverse('cources:cource_list'))\n\n", "from django.shortcuts import render, redirect\nfrom .models import Courses\nfrom django.views.generic import CreateView, ListView, UpdateView, DeleteView\nfrom .forms import CourceCreateForm\nfrom django.urls import reverse_lazy\nfrom django.urls import reverse\n\n\nclass CourceListView(ListView):\n model = Courses\n template_name = 'cources/cource_list.html'\n context_object_name = 'cources'\n\n\nclass CourceCreateView(CreateView):\n template_name = 'cources/create_cource.html'\n form_class = CourceCreateForm\n success_url = reverse_lazy('cources:cource_list')\n\n\nclass CourceUpdateView(UpdateView):\n model = Courses\n form_class = CourceCreateForm\n template_name = 'cources/course_update.html'\n success_url = reverse_lazy('cources:cource_list')\n\n\ndef DeleteView(request, pk):\n cource = Courses.objects.filter(pk=pk)\n cource.delete()\n return redirect(reverse('cources:cource_list'))\n", "<import token>\n\n\nclass CourceListView(ListView):\n model = Courses\n template_name = 'cources/cource_list.html'\n context_object_name = 'cources'\n\n\nclass CourceCreateView(CreateView):\n template_name = 'cources/create_cource.html'\n form_class = CourceCreateForm\n success_url = reverse_lazy('cources:cource_list')\n\n\nclass CourceUpdateView(UpdateView):\n model = Courses\n form_class = CourceCreateForm\n template_name = 'cources/course_update.html'\n success_url = reverse_lazy('cources:cource_list')\n\n\ndef DeleteView(request, pk):\n cource = Courses.objects.filter(pk=pk)\n cource.delete()\n return redirect(reverse('cources:cource_list'))\n", "<import token>\n\n\nclass CourceListView(ListView):\n model = Courses\n template_name = 'cources/cource_list.html'\n context_object_name = 'cources'\n\n\nclass CourceCreateView(CreateView):\n template_name = 'cources/create_cource.html'\n form_class = CourceCreateForm\n success_url = reverse_lazy('cources:cource_list')\n\n\nclass CourceUpdateView(UpdateView):\n model = Courses\n form_class = CourceCreateForm\n template_name = 'cources/course_update.html'\n success_url = reverse_lazy('cources:cource_list')\n\n\n<function token>\n", "<import token>\n\n\nclass CourceListView(ListView):\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass CourceCreateView(CreateView):\n template_name = 'cources/create_cource.html'\n form_class = CourceCreateForm\n success_url = reverse_lazy('cources:cource_list')\n\n\nclass CourceUpdateView(UpdateView):\n model = Courses\n form_class = CourceCreateForm\n template_name = 'cources/course_update.html'\n success_url = reverse_lazy('cources:cource_list')\n\n\n<function token>\n", "<import token>\n<class token>\n\n\nclass CourceCreateView(CreateView):\n template_name = 'cources/create_cource.html'\n form_class = CourceCreateForm\n success_url = reverse_lazy('cources:cource_list')\n\n\nclass CourceUpdateView(UpdateView):\n model = Courses\n form_class = CourceCreateForm\n template_name = 'cources/course_update.html'\n success_url = reverse_lazy('cources:cource_list')\n\n\n<function token>\n", "<import token>\n<class token>\n\n\nclass CourceCreateView(CreateView):\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass CourceUpdateView(UpdateView):\n model = Courses\n form_class = CourceCreateForm\n template_name = 'cources/course_update.html'\n success_url = reverse_lazy('cources:cource_list')\n\n\n<function token>\n", "<import token>\n<class token>\n<class token>\n\n\nclass CourceUpdateView(UpdateView):\n model = Courses\n form_class = CourceCreateForm\n template_name = 'cources/course_update.html'\n success_url = reverse_lazy('cources:cource_list')\n\n\n<function token>\n", "<import token>\n<class token>\n<class token>\n\n\nclass CourceUpdateView(UpdateView):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\n<function token>\n", "<import token>\n<class token>\n<class token>\n<class token>\n<function token>\n" ]
false
370
66444047f9e5eea845c8ac2dbaaf16fc2914d6ec
if answ[1] == 'дата': apisay(datetime.date.today(),toho,torep)
[ "if answ[1] == 'дата':\n\tapisay(datetime.date.today(),toho,torep)", "if answ[1] == 'дата':\n apisay(datetime.date.today(), toho, torep)\n", "<code token>\n" ]
false
371
d2f6d7c779d3d6e61d9da7af01a2931fdabec828
import random choices = ['X', 'O'] try: # Choice of X-O given to the player player_sym = input("Choose 'X' or 'O' : ") # raising an exception if the variable is not X or O if player_sym!='X' and player_sym!='O': raise Exception("Symbol not found") except Exception as e: print(e.args) else: # Allotting the other one as the computer symbol choices.remove(player_sym) comp_sym = choices[0] player_dict = {player_sym:'Player', comp_sym:'Computer'} # creating the board board = [' ']*9 gameEnd = False # to track when the game ends unmarked = [i for i in range(9)] # to track all the blank boxes left # gameOver function check if the game already has a winner def gameOver(board, symbol): # below is the sequence of all the possible winning combinations if board[0]==board[3]==board[6]==symbol or board[1]==board[7]==board[4]==symbol or board[2]==board[5]==board[8]==symbol or board[0]==board[1]==board[2]==symbol or board[5]==board[3]==board[4]==symbol or board[6]==board[7]==board[8]==symbol or board[2]==board[4]==board[6]==symbol or board[0]==board[4]==board[8]==symbol: # if there is a pattern match the game is over hence return True return True # function for marking the box with the symbol def mark(pos, symbol): board[pos] = symbol unmarked.remove(pos) # Used it for debugging : print(f"Unmarked : {unmarked}") # function to display the board at a particular time def displayBoard(): for i in range(len(board)): # formatting the output for the middle elements if i==1 or i==4 or i==7: print(f'|{board[i]}|', end=' ') elif i==2 or i==5: print(f'{board[i]}\n--------') # marks the end of a line and hence bifurcates two lines else: print(f'{board[i]}', end=' ') if __name__== "__main__": # this is where the game starts while not gameEnd: # loop until game ends try: player_pos = int(input("\n\nWhere would you mark? ")) # check if position index is on the board and is available for marking else raise Exception if player_pos<0 or player_pos>8 or (player_pos not in unmarked): raise Exception("Position out of Board") break except Exception as e: print(e.args) else: mark(player_pos, player_sym) # check if the game has already ended and if yes, declare the player as winner if gameOver(board, player_sym): displayBoard() print("\n\nPlayer Won!!!") break # computer will mark on some random square that is not marked yet comp_pos = unmarked[random.randint(0, len(unmarked)-1)] mark(comp_pos, comp_sym) # check if the game has already ended and if yes, declare the computer as winner if gameOver(board, comp_sym): displayBoard() print("\n\nComputer WON!!!") break # display the board after each iteration displayBoard() # marks the end of the game print("GAME OVER")
[ "import random\n\nchoices = ['X', 'O']\ntry:\n# Choice of X-O given to the player\n player_sym = input(\"Choose 'X' or 'O' : \")\n# raising an exception if the variable is not X or O\n if player_sym!='X' and player_sym!='O':\n raise Exception(\"Symbol not found\")\nexcept Exception as e:\n print(e.args)\nelse:\n# Allotting the other one as the computer symbol\n choices.remove(player_sym)\n comp_sym = choices[0]\n player_dict = {player_sym:'Player', comp_sym:'Computer'}\n \n# creating the board\nboard = [' ']*9\ngameEnd = False # to track when the game ends\nunmarked = [i for i in range(9)] # to track all the blank boxes left\n\n\n\n# gameOver function check if the game already has a winner\ndef gameOver(board, symbol):\n# below is the sequence of all the possible winning combinations \n if board[0]==board[3]==board[6]==symbol or board[1]==board[7]==board[4]==symbol or board[2]==board[5]==board[8]==symbol or board[0]==board[1]==board[2]==symbol or board[5]==board[3]==board[4]==symbol or board[6]==board[7]==board[8]==symbol or board[2]==board[4]==board[6]==symbol or board[0]==board[4]==board[8]==symbol:\n# if there is a pattern match the game is over hence return True\n return True\n\n\n\n# function for marking the box with the symbol\n\ndef mark(pos, symbol):\n board[pos] = symbol\n unmarked.remove(pos)\n# Used it for debugging : print(f\"Unmarked : {unmarked}\")\n\n\n\n# function to display the board at a particular time\ndef displayBoard():\n for i in range(len(board)):\n# formatting the output for the middle elements\n if i==1 or i==4 or i==7:\n print(f'|{board[i]}|', end=' ')\n elif i==2 or i==5:\n print(f'{board[i]}\\n--------') # marks the end of a line and hence bifurcates two lines\n else:\n print(f'{board[i]}', end=' ')\n\n\nif __name__== \"__main__\":\n # this is where the game starts \n while not gameEnd: # loop until game ends\n try:\n player_pos = int(input(\"\\n\\nWhere would you mark? \"))\n # check if position index is on the board and is available for marking else raise Exception\n if player_pos<0 or player_pos>8 or (player_pos not in unmarked): \n raise Exception(\"Position out of Board\")\n break\n except Exception as e:\n print(e.args)\n else:\n mark(player_pos, player_sym)\n \n # check if the game has already ended and if yes, declare the player as winner\n if gameOver(board, player_sym): \n displayBoard()\n print(\"\\n\\nPlayer Won!!!\")\n break\n \n # computer will mark on some random square that is not marked yet\n comp_pos = unmarked[random.randint(0, len(unmarked)-1)]\n mark(comp_pos, comp_sym)\n \n # check if the game has already ended and if yes, declare the computer as winner\n if gameOver(board, comp_sym): \n displayBoard()\n print(\"\\n\\nComputer WON!!!\")\n break\n \n # display the board after each iteration\n displayBoard()\n \n # marks the end of the game\n print(\"GAME OVER\")", "import random\nchoices = ['X', 'O']\ntry:\n player_sym = input(\"Choose 'X' or 'O' : \")\n if player_sym != 'X' and player_sym != 'O':\n raise Exception('Symbol not found')\nexcept Exception as e:\n print(e.args)\nelse:\n choices.remove(player_sym)\n comp_sym = choices[0]\n player_dict = {player_sym: 'Player', comp_sym: 'Computer'}\nboard = [' '] * 9\ngameEnd = False\nunmarked = [i for i in range(9)]\n\n\ndef gameOver(board, symbol):\n if board[0] == board[3] == board[6] == symbol or board[1] == board[7\n ] == board[4] == symbol or board[2] == board[5] == board[8\n ] == symbol or board[0] == board[1] == board[2] == symbol or board[5\n ] == board[3] == board[4] == symbol or board[6] == board[7] == board[8\n ] == symbol or board[2] == board[4] == board[6] == symbol or board[0\n ] == board[4] == board[8] == symbol:\n return True\n\n\ndef mark(pos, symbol):\n board[pos] = symbol\n unmarked.remove(pos)\n\n\ndef displayBoard():\n for i in range(len(board)):\n if i == 1 or i == 4 or i == 7:\n print(f'|{board[i]}|', end=' ')\n elif i == 2 or i == 5:\n print(f'{board[i]}\\n--------')\n else:\n print(f'{board[i]}', end=' ')\n\n\nif __name__ == '__main__':\n while not gameEnd:\n try:\n player_pos = int(input('\\n\\nWhere would you mark? '))\n if player_pos < 0 or player_pos > 8 or player_pos not in unmarked:\n raise Exception('Position out of Board')\n break\n except Exception as e:\n print(e.args)\n else:\n mark(player_pos, player_sym)\n if gameOver(board, player_sym):\n displayBoard()\n print('\\n\\nPlayer Won!!!')\n break\n comp_pos = unmarked[random.randint(0, len(unmarked) - 1)]\n mark(comp_pos, comp_sym)\n if gameOver(board, comp_sym):\n displayBoard()\n print('\\n\\nComputer WON!!!')\n break\n displayBoard()\n print('GAME OVER')\n", "<import token>\nchoices = ['X', 'O']\ntry:\n player_sym = input(\"Choose 'X' or 'O' : \")\n if player_sym != 'X' and player_sym != 'O':\n raise Exception('Symbol not found')\nexcept Exception as e:\n print(e.args)\nelse:\n choices.remove(player_sym)\n comp_sym = choices[0]\n player_dict = {player_sym: 'Player', comp_sym: 'Computer'}\nboard = [' '] * 9\ngameEnd = False\nunmarked = [i for i in range(9)]\n\n\ndef gameOver(board, symbol):\n if board[0] == board[3] == board[6] == symbol or board[1] == board[7\n ] == board[4] == symbol or board[2] == board[5] == board[8\n ] == symbol or board[0] == board[1] == board[2] == symbol or board[5\n ] == board[3] == board[4] == symbol or board[6] == board[7] == board[8\n ] == symbol or board[2] == board[4] == board[6] == symbol or board[0\n ] == board[4] == board[8] == symbol:\n return True\n\n\ndef mark(pos, symbol):\n board[pos] = symbol\n unmarked.remove(pos)\n\n\ndef displayBoard():\n for i in range(len(board)):\n if i == 1 or i == 4 or i == 7:\n print(f'|{board[i]}|', end=' ')\n elif i == 2 or i == 5:\n print(f'{board[i]}\\n--------')\n else:\n print(f'{board[i]}', end=' ')\n\n\nif __name__ == '__main__':\n while not gameEnd:\n try:\n player_pos = int(input('\\n\\nWhere would you mark? '))\n if player_pos < 0 or player_pos > 8 or player_pos not in unmarked:\n raise Exception('Position out of Board')\n break\n except Exception as e:\n print(e.args)\n else:\n mark(player_pos, player_sym)\n if gameOver(board, player_sym):\n displayBoard()\n print('\\n\\nPlayer Won!!!')\n break\n comp_pos = unmarked[random.randint(0, len(unmarked) - 1)]\n mark(comp_pos, comp_sym)\n if gameOver(board, comp_sym):\n displayBoard()\n print('\\n\\nComputer WON!!!')\n break\n displayBoard()\n print('GAME OVER')\n", "<import token>\n<assignment token>\ntry:\n player_sym = input(\"Choose 'X' or 'O' : \")\n if player_sym != 'X' and player_sym != 'O':\n raise Exception('Symbol not found')\nexcept Exception as e:\n print(e.args)\nelse:\n choices.remove(player_sym)\n comp_sym = choices[0]\n player_dict = {player_sym: 'Player', comp_sym: 'Computer'}\n<assignment token>\n\n\ndef gameOver(board, symbol):\n if board[0] == board[3] == board[6] == symbol or board[1] == board[7\n ] == board[4] == symbol or board[2] == board[5] == board[8\n ] == symbol or board[0] == board[1] == board[2] == symbol or board[5\n ] == board[3] == board[4] == symbol or board[6] == board[7] == board[8\n ] == symbol or board[2] == board[4] == board[6] == symbol or board[0\n ] == board[4] == board[8] == symbol:\n return True\n\n\ndef mark(pos, symbol):\n board[pos] = symbol\n unmarked.remove(pos)\n\n\ndef displayBoard():\n for i in range(len(board)):\n if i == 1 or i == 4 or i == 7:\n print(f'|{board[i]}|', end=' ')\n elif i == 2 or i == 5:\n print(f'{board[i]}\\n--------')\n else:\n print(f'{board[i]}', end=' ')\n\n\nif __name__ == '__main__':\n while not gameEnd:\n try:\n player_pos = int(input('\\n\\nWhere would you mark? '))\n if player_pos < 0 or player_pos > 8 or player_pos not in unmarked:\n raise Exception('Position out of Board')\n break\n except Exception as e:\n print(e.args)\n else:\n mark(player_pos, player_sym)\n if gameOver(board, player_sym):\n displayBoard()\n print('\\n\\nPlayer Won!!!')\n break\n comp_pos = unmarked[random.randint(0, len(unmarked) - 1)]\n mark(comp_pos, comp_sym)\n if gameOver(board, comp_sym):\n displayBoard()\n print('\\n\\nComputer WON!!!')\n break\n displayBoard()\n print('GAME OVER')\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef gameOver(board, symbol):\n if board[0] == board[3] == board[6] == symbol or board[1] == board[7\n ] == board[4] == symbol or board[2] == board[5] == board[8\n ] == symbol or board[0] == board[1] == board[2] == symbol or board[5\n ] == board[3] == board[4] == symbol or board[6] == board[7] == board[8\n ] == symbol or board[2] == board[4] == board[6] == symbol or board[0\n ] == board[4] == board[8] == symbol:\n return True\n\n\ndef mark(pos, symbol):\n board[pos] = symbol\n unmarked.remove(pos)\n\n\ndef displayBoard():\n for i in range(len(board)):\n if i == 1 or i == 4 or i == 7:\n print(f'|{board[i]}|', end=' ')\n elif i == 2 or i == 5:\n print(f'{board[i]}\\n--------')\n else:\n print(f'{board[i]}', end=' ')\n\n\n<code token>\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef gameOver(board, symbol):\n if board[0] == board[3] == board[6] == symbol or board[1] == board[7\n ] == board[4] == symbol or board[2] == board[5] == board[8\n ] == symbol or board[0] == board[1] == board[2] == symbol or board[5\n ] == board[3] == board[4] == symbol or board[6] == board[7] == board[8\n ] == symbol or board[2] == board[4] == board[6] == symbol or board[0\n ] == board[4] == board[8] == symbol:\n return True\n\n\n<function token>\n\n\ndef displayBoard():\n for i in range(len(board)):\n if i == 1 or i == 4 or i == 7:\n print(f'|{board[i]}|', end=' ')\n elif i == 2 or i == 5:\n print(f'{board[i]}\\n--------')\n else:\n print(f'{board[i]}', end=' ')\n\n\n<code token>\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef displayBoard():\n for i in range(len(board)):\n if i == 1 or i == 4 or i == 7:\n print(f'|{board[i]}|', end=' ')\n elif i == 2 or i == 5:\n print(f'{board[i]}\\n--------')\n else:\n print(f'{board[i]}', end=' ')\n\n\n<code token>\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<code token>\n" ]
false
372
b4c6075aabe833f6fe23471f608d928edd25ef63
from .base import paw_test class warning_test(paw_test): def test_warning_badchars(self): self.paw.cset_lookup(self.badchar) self.assertEqual(1, self.paw.wcount)
[ "from .base import paw_test\r\n\r\n\r\nclass warning_test(paw_test):\r\n def test_warning_badchars(self):\r\n self.paw.cset_lookup(self.badchar)\r\n self.assertEqual(1, self.paw.wcount)\r\n", "from .base import paw_test\n\n\nclass warning_test(paw_test):\n\n def test_warning_badchars(self):\n self.paw.cset_lookup(self.badchar)\n self.assertEqual(1, self.paw.wcount)\n", "<import token>\n\n\nclass warning_test(paw_test):\n\n def test_warning_badchars(self):\n self.paw.cset_lookup(self.badchar)\n self.assertEqual(1, self.paw.wcount)\n", "<import token>\n\n\nclass warning_test(paw_test):\n <function token>\n", "<import token>\n<class token>\n" ]
false
373
24c9b562411a63f0d3f2ee509bb60dafe7fbecd1
import os from flask import Flask # from flask_login import LoginManager from flask_sqlalchemy import SQLAlchemy # from flask_bcrypt import Bcrypt from flask_wtf.csrf import CSRFProtect app = Flask(__name__) csrf = CSRFProtect(app) # bcrypt = Bcrypt(app) app.config['SECRET_KEY'] = 'v\xf9\xf7\x11\x13\x18\xfaMYp\xed_\xe8\xc9w\x06\x8e\xf0f\xd2\xba\xfd\x8c\xda' app.config['SQLALCHEMY_DATABASE_URI'] = 'postgres://vwfeollskqmjyw:1d738da99074015b148d72cfd94ea584dcb39e81c1bb197fb9da65455c756b0f@ec2-50-17-227-28.compute-1.amazonaws.com:5432/dc8lr6j69aeqjt' #app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql: //infoproj2:info3180@localhost/infoproj2' PRO_PIC_UPLOAD_FOLDER = "./app/static/profile_photos" POSTS_UPLOAD_FOLDER = "./app/static/posts_photos" app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True # added just to suppress a warning db = SQLAlchemy(app) #Flask-Login LoginManager # login_manager = LoginManager() # login_manager.init_app(app) # login_manager.login_view = 'login' app.config.from_object(__name__) from app import views,models #intense-basin-58864 #name of project on heroku
[ "import os\nfrom flask import Flask\n# from flask_login import LoginManager\nfrom flask_sqlalchemy import SQLAlchemy\n# from flask_bcrypt import Bcrypt\nfrom flask_wtf.csrf import CSRFProtect\n\napp = Flask(__name__)\ncsrf = CSRFProtect(app)\n# bcrypt = Bcrypt(app)\n\napp.config['SECRET_KEY'] = 'v\\xf9\\xf7\\x11\\x13\\x18\\xfaMYp\\xed_\\xe8\\xc9w\\x06\\x8e\\xf0f\\xd2\\xba\\xfd\\x8c\\xda'\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgres://vwfeollskqmjyw:1d738da99074015b148d72cfd94ea584dcb39e81c1bb197fb9da65455c756b0f@ec2-50-17-227-28.compute-1.amazonaws.com:5432/dc8lr6j69aeqjt'\n\n#app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql: //infoproj2:info3180@localhost/infoproj2' \n\nPRO_PIC_UPLOAD_FOLDER = \"./app/static/profile_photos\"\nPOSTS_UPLOAD_FOLDER = \"./app/static/posts_photos\"\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True # added just to suppress a warning\n\ndb = SQLAlchemy(app)\n\n#Flask-Login LoginManager\n# login_manager = LoginManager()\n# login_manager.init_app(app)\n# login_manager.login_view = 'login'\n\napp.config.from_object(__name__)\nfrom app import views,models\n#intense-basin-58864 #name of project on heroku", "import os\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_wtf.csrf import CSRFProtect\napp = Flask(__name__)\ncsrf = CSRFProtect(app)\napp.config['SECRET_KEY'] = 'vù÷\\x11\\x13\\x18úMYpí_èÉw\\x06\\x8eðfÒºý\\x8cÚ'\napp.config['SQLALCHEMY_DATABASE_URI'] = (\n 'postgres://vwfeollskqmjyw:1d738da99074015b148d72cfd94ea584dcb39e81c1bb197fb9da65455c756b0f@ec2-50-17-227-28.compute-1.amazonaws.com:5432/dc8lr6j69aeqjt'\n )\nPRO_PIC_UPLOAD_FOLDER = './app/static/profile_photos'\nPOSTS_UPLOAD_FOLDER = './app/static/posts_photos'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\ndb = SQLAlchemy(app)\napp.config.from_object(__name__)\nfrom app import views, models\n", "<import token>\napp = Flask(__name__)\ncsrf = CSRFProtect(app)\napp.config['SECRET_KEY'] = 'vù÷\\x11\\x13\\x18úMYpí_èÉw\\x06\\x8eðfÒºý\\x8cÚ'\napp.config['SQLALCHEMY_DATABASE_URI'] = (\n 'postgres://vwfeollskqmjyw:1d738da99074015b148d72cfd94ea584dcb39e81c1bb197fb9da65455c756b0f@ec2-50-17-227-28.compute-1.amazonaws.com:5432/dc8lr6j69aeqjt'\n )\nPRO_PIC_UPLOAD_FOLDER = './app/static/profile_photos'\nPOSTS_UPLOAD_FOLDER = './app/static/posts_photos'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\ndb = SQLAlchemy(app)\napp.config.from_object(__name__)\n<import token>\n", "<import token>\n<assignment token>\napp.config.from_object(__name__)\n<import token>\n", "<import token>\n<assignment token>\n<code token>\n<import token>\n" ]
false
374
14a357f3dfb3d59f1d8cfd566edeaf8b0e5bb56d
#!/usr/bin/env python import rospy from geometry_msgs.msg import Twist from sensor_msgs.msg import Joy import serial from sys import platform if platform == "linux" or platform == "linux2": ser = serial.Serial('/dev/ttyACM0') elif platform == "darwin": pass elif platform == "win32": # Windows... ser = serial.Serial('COM16') """ In this test code we are testing basic vehicle control over the network we use ROS middleware to send the control commands This script runs at the remote driver end. Receives joystick messages (subscribed to Joy topic) then converts the joystick inputs into commands WE ARE NOT USING THIS METHOD NOW --- WE HAVE SEPERATED OUT ALL THE STREAMS FROM THE JOYSTICK """ oldvar = 0 first_a = 0 first_d = 0 # Configuatrion tuned for CAR in LOW speed base_throttle = 5500 peak_throttle = 6500 base_brake = 450 peak_brake = 600 button = 0 def callback(data): global first_a global first_d global oldvar global base_throttle global peak_throttle global base_brake global peak_brake global button # print data axis1 = -data.axes[1] axis3 = -data.axes[3] # in logitech axis 3 is axis 4 confirm with ashish button1 = data.buttons[1] button4 = data.buttons[4] button5 = data.buttons[5] button_ = button1+button4+button5 if axis1 > 0.1: bval = int((axis1) * (peak_brake - base_brake) + base_brake) print(bval) ser.write(str(bval).encode('utf-8')) ser.write("a".encode('utf-8')) #### ser.write("4000a".encode('utf-8')) #throttle released on braking print("Brake") elif (axis1 < -0.1 and axis3 < 0.1): tval = int((axis1 * -1 + axis3 * -1) * (peak_throttle - base_throttle) * 0.5 + base_throttle) if (abs(tval - oldvar) > 5): #print(tval) ser.write(str(tval).encode('utf-8')) ser.write("a".encode('utf-8')) ser.write("450a".encode('utf-8')) # brake released on acceleration print("Throttle") oldvar = tval elif (axis1 > -0.1 and axis1 < 0.1): ser.write("4000a".encode('utf-8')) ser.write("450a".encode('utf-8')) # brake released print("Zero Throttle") print (axis1) print (axis3) if button1 == 1: print("Emergency Brake") ser.write("4600a".encode('utf-8')) # throttle released ser.write("600a".encode('utf-8')) # brake engaged if (button4 and button5 == 0): if (first_a == 0): ser.write("1000a".encode('utf-8')) print("Joystick button 4 pressed.") first_a = 1 if (button5 and button4 == 0): if (first_d == 0): ser.write("2000a".encode('utf-8')) print("Joystick button 5 pressed.") first_d = 1 if(button-button_!= 0): if(button4 == 0): first_a = 0 if(button5 == 0): first_d = 0 ser.write("3000a".encode('utf-8')) print("Joystick button released.") button = button_ # Intializes everything def start(): rospy.Subscriber("joy", Joy, callback) # starts the node rospy.init_node('Joy2Turtle') rospy.spin() if __name__ == '__main__': start()
[ "#!/usr/bin/env python\nimport rospy\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import Joy\n\nimport serial\nfrom sys import platform\nif platform == \"linux\" or platform == \"linux2\":\n ser = serial.Serial('/dev/ttyACM0')\nelif platform == \"darwin\":\n pass\nelif platform == \"win32\":\n # Windows...\n ser = serial.Serial('COM16')\n\"\"\"\nIn this test code we are testing basic vehicle control over the network\nwe use ROS middleware to send the control commands \nThis script runs at the remote driver end. \nReceives joystick messages (subscribed to Joy topic)\nthen converts the joystick inputs into commands\n\nWE ARE NOT USING THIS METHOD NOW \n--- WE HAVE SEPERATED OUT ALL THE STREAMS FROM THE JOYSTICK\n\n\"\"\"\n\noldvar = 0\nfirst_a = 0\nfirst_d = 0\n# Configuatrion tuned for CAR in LOW speed\nbase_throttle = 5500\npeak_throttle = 6500\nbase_brake = 450\npeak_brake = 600\nbutton = 0\n\n\ndef callback(data):\n global first_a\n global first_d\n global oldvar\n global base_throttle\n global peak_throttle\n global base_brake\n global peak_brake\n global button\n # print data\n axis1 = -data.axes[1]\n axis3 = -data.axes[3] # in logitech axis 3 is axis 4 confirm with ashish\n button1 = data.buttons[1]\n button4 = data.buttons[4]\n button5 = data.buttons[5]\n\n button_ = button1+button4+button5\n\n if axis1 > 0.1:\n bval = int((axis1) * (peak_brake - base_brake) + base_brake)\n print(bval)\n ser.write(str(bval).encode('utf-8'))\n ser.write(\"a\".encode('utf-8'))\n #### ser.write(\"4000a\".encode('utf-8')) #throttle released on braking\n print(\"Brake\")\n elif (axis1 < -0.1 and axis3 < 0.1):\n tval = int((axis1 * -1 + axis3 * -1) * (peak_throttle - base_throttle) * 0.5 + base_throttle)\n if (abs(tval - oldvar) > 5):\n #print(tval)\n ser.write(str(tval).encode('utf-8'))\n ser.write(\"a\".encode('utf-8'))\n ser.write(\"450a\".encode('utf-8')) # brake released on acceleration\n print(\"Throttle\")\n oldvar = tval\n elif (axis1 > -0.1 and axis1 < 0.1):\n ser.write(\"4000a\".encode('utf-8'))\n ser.write(\"450a\".encode('utf-8')) # brake released\n print(\"Zero Throttle\")\n print (axis1)\n print (axis3)\n\n if button1 == 1:\n print(\"Emergency Brake\")\n ser.write(\"4600a\".encode('utf-8')) # throttle released\n ser.write(\"600a\".encode('utf-8')) # brake engaged\n\n if (button4 and button5 == 0):\n if (first_a == 0):\n ser.write(\"1000a\".encode('utf-8'))\n print(\"Joystick button 4 pressed.\")\n first_a = 1\n if (button5 and button4 == 0):\n if (first_d == 0):\n ser.write(\"2000a\".encode('utf-8'))\n print(\"Joystick button 5 pressed.\")\n first_d = 1\n\n if(button-button_!= 0):\n if(button4 == 0):\n first_a = 0\n if(button5 == 0):\n first_d = 0\n ser.write(\"3000a\".encode('utf-8'))\n print(\"Joystick button released.\")\n button = button_\n\n# Intializes everything\ndef start():\n rospy.Subscriber(\"joy\", Joy, callback)\n # starts the node\n rospy.init_node('Joy2Turtle')\n rospy.spin()\n\n\nif __name__ == '__main__':\n start()\n", "import rospy\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import Joy\nimport serial\nfrom sys import platform\nif platform == 'linux' or platform == 'linux2':\n ser = serial.Serial('/dev/ttyACM0')\nelif platform == 'darwin':\n pass\nelif platform == 'win32':\n ser = serial.Serial('COM16')\n<docstring token>\noldvar = 0\nfirst_a = 0\nfirst_d = 0\nbase_throttle = 5500\npeak_throttle = 6500\nbase_brake = 450\npeak_brake = 600\nbutton = 0\n\n\ndef callback(data):\n global first_a\n global first_d\n global oldvar\n global base_throttle\n global peak_throttle\n global base_brake\n global peak_brake\n global button\n axis1 = -data.axes[1]\n axis3 = -data.axes[3]\n button1 = data.buttons[1]\n button4 = data.buttons[4]\n button5 = data.buttons[5]\n button_ = button1 + button4 + button5\n if axis1 > 0.1:\n bval = int(axis1 * (peak_brake - base_brake) + base_brake)\n print(bval)\n ser.write(str(bval).encode('utf-8'))\n ser.write('a'.encode('utf-8'))\n print('Brake')\n elif axis1 < -0.1 and axis3 < 0.1:\n tval = int((axis1 * -1 + axis3 * -1) * (peak_throttle -\n base_throttle) * 0.5 + base_throttle)\n if abs(tval - oldvar) > 5:\n ser.write(str(tval).encode('utf-8'))\n ser.write('a'.encode('utf-8'))\n ser.write('450a'.encode('utf-8'))\n print('Throttle')\n oldvar = tval\n elif axis1 > -0.1 and axis1 < 0.1:\n ser.write('4000a'.encode('utf-8'))\n ser.write('450a'.encode('utf-8'))\n print('Zero Throttle')\n print(axis1)\n print(axis3)\n if button1 == 1:\n print('Emergency Brake')\n ser.write('4600a'.encode('utf-8'))\n ser.write('600a'.encode('utf-8'))\n if button4 and button5 == 0:\n if first_a == 0:\n ser.write('1000a'.encode('utf-8'))\n print('Joystick button 4 pressed.')\n first_a = 1\n if button5 and button4 == 0:\n if first_d == 0:\n ser.write('2000a'.encode('utf-8'))\n print('Joystick button 5 pressed.')\n first_d = 1\n if button - button_ != 0:\n if button4 == 0:\n first_a = 0\n if button5 == 0:\n first_d = 0\n ser.write('3000a'.encode('utf-8'))\n print('Joystick button released.')\n button = button_\n\n\ndef start():\n rospy.Subscriber('joy', Joy, callback)\n rospy.init_node('Joy2Turtle')\n rospy.spin()\n\n\nif __name__ == '__main__':\n start()\n", "<import token>\nif platform == 'linux' or platform == 'linux2':\n ser = serial.Serial('/dev/ttyACM0')\nelif platform == 'darwin':\n pass\nelif platform == 'win32':\n ser = serial.Serial('COM16')\n<docstring token>\noldvar = 0\nfirst_a = 0\nfirst_d = 0\nbase_throttle = 5500\npeak_throttle = 6500\nbase_brake = 450\npeak_brake = 600\nbutton = 0\n\n\ndef callback(data):\n global first_a\n global first_d\n global oldvar\n global base_throttle\n global peak_throttle\n global base_brake\n global peak_brake\n global button\n axis1 = -data.axes[1]\n axis3 = -data.axes[3]\n button1 = data.buttons[1]\n button4 = data.buttons[4]\n button5 = data.buttons[5]\n button_ = button1 + button4 + button5\n if axis1 > 0.1:\n bval = int(axis1 * (peak_brake - base_brake) + base_brake)\n print(bval)\n ser.write(str(bval).encode('utf-8'))\n ser.write('a'.encode('utf-8'))\n print('Brake')\n elif axis1 < -0.1 and axis3 < 0.1:\n tval = int((axis1 * -1 + axis3 * -1) * (peak_throttle -\n base_throttle) * 0.5 + base_throttle)\n if abs(tval - oldvar) > 5:\n ser.write(str(tval).encode('utf-8'))\n ser.write('a'.encode('utf-8'))\n ser.write('450a'.encode('utf-8'))\n print('Throttle')\n oldvar = tval\n elif axis1 > -0.1 and axis1 < 0.1:\n ser.write('4000a'.encode('utf-8'))\n ser.write('450a'.encode('utf-8'))\n print('Zero Throttle')\n print(axis1)\n print(axis3)\n if button1 == 1:\n print('Emergency Brake')\n ser.write('4600a'.encode('utf-8'))\n ser.write('600a'.encode('utf-8'))\n if button4 and button5 == 0:\n if first_a == 0:\n ser.write('1000a'.encode('utf-8'))\n print('Joystick button 4 pressed.')\n first_a = 1\n if button5 and button4 == 0:\n if first_d == 0:\n ser.write('2000a'.encode('utf-8'))\n print('Joystick button 5 pressed.')\n first_d = 1\n if button - button_ != 0:\n if button4 == 0:\n first_a = 0\n if button5 == 0:\n first_d = 0\n ser.write('3000a'.encode('utf-8'))\n print('Joystick button released.')\n button = button_\n\n\ndef start():\n rospy.Subscriber('joy', Joy, callback)\n rospy.init_node('Joy2Turtle')\n rospy.spin()\n\n\nif __name__ == '__main__':\n start()\n", "<import token>\nif platform == 'linux' or platform == 'linux2':\n ser = serial.Serial('/dev/ttyACM0')\nelif platform == 'darwin':\n pass\nelif platform == 'win32':\n ser = serial.Serial('COM16')\n<docstring token>\n<assignment token>\n\n\ndef callback(data):\n global first_a\n global first_d\n global oldvar\n global base_throttle\n global peak_throttle\n global base_brake\n global peak_brake\n global button\n axis1 = -data.axes[1]\n axis3 = -data.axes[3]\n button1 = data.buttons[1]\n button4 = data.buttons[4]\n button5 = data.buttons[5]\n button_ = button1 + button4 + button5\n if axis1 > 0.1:\n bval = int(axis1 * (peak_brake - base_brake) + base_brake)\n print(bval)\n ser.write(str(bval).encode('utf-8'))\n ser.write('a'.encode('utf-8'))\n print('Brake')\n elif axis1 < -0.1 and axis3 < 0.1:\n tval = int((axis1 * -1 + axis3 * -1) * (peak_throttle -\n base_throttle) * 0.5 + base_throttle)\n if abs(tval - oldvar) > 5:\n ser.write(str(tval).encode('utf-8'))\n ser.write('a'.encode('utf-8'))\n ser.write('450a'.encode('utf-8'))\n print('Throttle')\n oldvar = tval\n elif axis1 > -0.1 and axis1 < 0.1:\n ser.write('4000a'.encode('utf-8'))\n ser.write('450a'.encode('utf-8'))\n print('Zero Throttle')\n print(axis1)\n print(axis3)\n if button1 == 1:\n print('Emergency Brake')\n ser.write('4600a'.encode('utf-8'))\n ser.write('600a'.encode('utf-8'))\n if button4 and button5 == 0:\n if first_a == 0:\n ser.write('1000a'.encode('utf-8'))\n print('Joystick button 4 pressed.')\n first_a = 1\n if button5 and button4 == 0:\n if first_d == 0:\n ser.write('2000a'.encode('utf-8'))\n print('Joystick button 5 pressed.')\n first_d = 1\n if button - button_ != 0:\n if button4 == 0:\n first_a = 0\n if button5 == 0:\n first_d = 0\n ser.write('3000a'.encode('utf-8'))\n print('Joystick button released.')\n button = button_\n\n\ndef start():\n rospy.Subscriber('joy', Joy, callback)\n rospy.init_node('Joy2Turtle')\n rospy.spin()\n\n\nif __name__ == '__main__':\n start()\n", "<import token>\n<code token>\n<docstring token>\n<assignment token>\n\n\ndef callback(data):\n global first_a\n global first_d\n global oldvar\n global base_throttle\n global peak_throttle\n global base_brake\n global peak_brake\n global button\n axis1 = -data.axes[1]\n axis3 = -data.axes[3]\n button1 = data.buttons[1]\n button4 = data.buttons[4]\n button5 = data.buttons[5]\n button_ = button1 + button4 + button5\n if axis1 > 0.1:\n bval = int(axis1 * (peak_brake - base_brake) + base_brake)\n print(bval)\n ser.write(str(bval).encode('utf-8'))\n ser.write('a'.encode('utf-8'))\n print('Brake')\n elif axis1 < -0.1 and axis3 < 0.1:\n tval = int((axis1 * -1 + axis3 * -1) * (peak_throttle -\n base_throttle) * 0.5 + base_throttle)\n if abs(tval - oldvar) > 5:\n ser.write(str(tval).encode('utf-8'))\n ser.write('a'.encode('utf-8'))\n ser.write('450a'.encode('utf-8'))\n print('Throttle')\n oldvar = tval\n elif axis1 > -0.1 and axis1 < 0.1:\n ser.write('4000a'.encode('utf-8'))\n ser.write('450a'.encode('utf-8'))\n print('Zero Throttle')\n print(axis1)\n print(axis3)\n if button1 == 1:\n print('Emergency Brake')\n ser.write('4600a'.encode('utf-8'))\n ser.write('600a'.encode('utf-8'))\n if button4 and button5 == 0:\n if first_a == 0:\n ser.write('1000a'.encode('utf-8'))\n print('Joystick button 4 pressed.')\n first_a = 1\n if button5 and button4 == 0:\n if first_d == 0:\n ser.write('2000a'.encode('utf-8'))\n print('Joystick button 5 pressed.')\n first_d = 1\n if button - button_ != 0:\n if button4 == 0:\n first_a = 0\n if button5 == 0:\n first_d = 0\n ser.write('3000a'.encode('utf-8'))\n print('Joystick button released.')\n button = button_\n\n\ndef start():\n rospy.Subscriber('joy', Joy, callback)\n rospy.init_node('Joy2Turtle')\n rospy.spin()\n\n\n<code token>\n", "<import token>\n<code token>\n<docstring token>\n<assignment token>\n\n\ndef callback(data):\n global first_a\n global first_d\n global oldvar\n global base_throttle\n global peak_throttle\n global base_brake\n global peak_brake\n global button\n axis1 = -data.axes[1]\n axis3 = -data.axes[3]\n button1 = data.buttons[1]\n button4 = data.buttons[4]\n button5 = data.buttons[5]\n button_ = button1 + button4 + button5\n if axis1 > 0.1:\n bval = int(axis1 * (peak_brake - base_brake) + base_brake)\n print(bval)\n ser.write(str(bval).encode('utf-8'))\n ser.write('a'.encode('utf-8'))\n print('Brake')\n elif axis1 < -0.1 and axis3 < 0.1:\n tval = int((axis1 * -1 + axis3 * -1) * (peak_throttle -\n base_throttle) * 0.5 + base_throttle)\n if abs(tval - oldvar) > 5:\n ser.write(str(tval).encode('utf-8'))\n ser.write('a'.encode('utf-8'))\n ser.write('450a'.encode('utf-8'))\n print('Throttle')\n oldvar = tval\n elif axis1 > -0.1 and axis1 < 0.1:\n ser.write('4000a'.encode('utf-8'))\n ser.write('450a'.encode('utf-8'))\n print('Zero Throttle')\n print(axis1)\n print(axis3)\n if button1 == 1:\n print('Emergency Brake')\n ser.write('4600a'.encode('utf-8'))\n ser.write('600a'.encode('utf-8'))\n if button4 and button5 == 0:\n if first_a == 0:\n ser.write('1000a'.encode('utf-8'))\n print('Joystick button 4 pressed.')\n first_a = 1\n if button5 and button4 == 0:\n if first_d == 0:\n ser.write('2000a'.encode('utf-8'))\n print('Joystick button 5 pressed.')\n first_d = 1\n if button - button_ != 0:\n if button4 == 0:\n first_a = 0\n if button5 == 0:\n first_d = 0\n ser.write('3000a'.encode('utf-8'))\n print('Joystick button released.')\n button = button_\n\n\n<function token>\n<code token>\n", "<import token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<function token>\n<code token>\n" ]
false
375
1ef9df43725196904ec6c0c881f4a1204174b176
import requests, shutil, os, glob from zipfile import ZipFile import pandas as pd from xlrd import open_workbook import csv # zipfilename = 'desiya_hotels' # try: # # downloading zip file # r = requests.get('http://staticstore.travelguru.com/testdump/1300001176/Excel.zip', auth=('testdump', 'testdump'), verify=False,stream=True) #Note web_link is https:// # r.raw.decode_content = True # with open(os.path.join(os.path.dirname(__file__), 'storage/{}.zip'.format(zipfilename)), 'wb') as f: # shutil.copyfileobj(r.raw, f) # # #extracting zip file as xls file # with ZipFile(glob.glob(os.path.join(os.path.dirname(__file__), 'storage/desiya*.zip'))[0], 'r') as zip: # zip.extractall(os.path.join(os.path.dirname(__file__), 'storage/')) # #Rename xls file name as "desiya_hotels" # if glob.glob(os.path.join(os.path.dirname(__file__), 'storage/*[0-9].xls')): # for filename in glob.glob(os.path.join(os.path.dirname(__file__), 'storage/*[a-zA-z].xls')): # os.remove(filename) # os.rename(glob.glob(os.path.join(os.path.dirname(__file__), 'storage/*[0-9].xls'))[0], os.path.join(os.path.dirname(__file__),'storage/{}.xls'.format(zipfilename))) # else: # print('unzipped xls file is not found in storare folder') # except Exception as e: # print("Error while downloading zip file") #read xls file # xls = pd.ExcelFile(glob.glob(os.path.join(os.path.dirname(__file__), 'storage/desiya*.xls'))[0]) # df = pd.read_excel(xls, sheet_name=0, index_col=None) # print(df['Name']) # print(df.head(5)) # for index, row in df.iterrows(): # print(index, row[3]) #convert xls to csvc # df.to_csv(os.path.join(os.path.dirname(__file__),'storage/{}'.format('robot.csv')), encoding='utf-8', index=False) #convert xls file to csv using xlrd module xlsfile = glob.glob(os.path.join(os.path.dirname(__file__), 'storage/robot*.xls'))[0] wb = open_workbook(xlsfile) sheet = wb.sheet_by_name('robot_list') with open(os.path.join(os.path.dirname(__file__), 'storage/robot_list.csv'), "w") as file: writer = csv.writer(file, delimiter=",") headers = [cell.value for cell in sheet.row(0)] writer.writerow(headers) for i in range(1, sheet.nrows): rowvalue_list = [str(cell.value).strip() if cell.value else None for cell in sheet.row(i)] writer.writerow(rowvalue_list)
[ "\n\nimport requests, shutil, os, glob\nfrom zipfile import ZipFile\nimport pandas as pd\nfrom xlrd import open_workbook\nimport csv\n\n# zipfilename = 'desiya_hotels'\n# try:\n# # downloading zip file\n# r = requests.get('http://staticstore.travelguru.com/testdump/1300001176/Excel.zip', auth=('testdump', 'testdump'), verify=False,stream=True) #Note web_link is https://\n# r.raw.decode_content = True\n# with open(os.path.join(os.path.dirname(__file__), 'storage/{}.zip'.format(zipfilename)), 'wb') as f:\n# shutil.copyfileobj(r.raw, f)\n#\n# #extracting zip file as xls file\n# with ZipFile(glob.glob(os.path.join(os.path.dirname(__file__), 'storage/desiya*.zip'))[0], 'r') as zip:\n# zip.extractall(os.path.join(os.path.dirname(__file__), 'storage/'))\n# #Rename xls file name as \"desiya_hotels\"\n# if glob.glob(os.path.join(os.path.dirname(__file__), 'storage/*[0-9].xls')):\n# for filename in glob.glob(os.path.join(os.path.dirname(__file__), 'storage/*[a-zA-z].xls')):\n# os.remove(filename)\n# os.rename(glob.glob(os.path.join(os.path.dirname(__file__), 'storage/*[0-9].xls'))[0], os.path.join(os.path.dirname(__file__),'storage/{}.xls'.format(zipfilename)))\n# else:\n# print('unzipped xls file is not found in storare folder')\n# except Exception as e:\n# print(\"Error while downloading zip file\")\n\n#read xls file\n# xls = pd.ExcelFile(glob.glob(os.path.join(os.path.dirname(__file__), 'storage/desiya*.xls'))[0])\n# df = pd.read_excel(xls, sheet_name=0, index_col=None)\n# print(df['Name'])\n# print(df.head(5))\n# for index, row in df.iterrows():\n# print(index, row[3])\n\n#convert xls to csvc\n# df.to_csv(os.path.join(os.path.dirname(__file__),'storage/{}'.format('robot.csv')), encoding='utf-8', index=False)\n\n\n#convert xls file to csv using xlrd module\nxlsfile = glob.glob(os.path.join(os.path.dirname(__file__), 'storage/robot*.xls'))[0]\nwb = open_workbook(xlsfile)\nsheet = wb.sheet_by_name('robot_list')\nwith open(os.path.join(os.path.dirname(__file__), 'storage/robot_list.csv'), \"w\") as file:\n writer = csv.writer(file, delimiter=\",\")\n headers = [cell.value for cell in sheet.row(0)]\n writer.writerow(headers)\n for i in range(1, sheet.nrows):\n rowvalue_list = [str(cell.value).strip() if cell.value else None for cell in sheet.row(i)]\n writer.writerow(rowvalue_list)\n\n\n\n\n\n\n", "import requests, shutil, os, glob\nfrom zipfile import ZipFile\nimport pandas as pd\nfrom xlrd import open_workbook\nimport csv\nxlsfile = glob.glob(os.path.join(os.path.dirname(__file__),\n 'storage/robot*.xls'))[0]\nwb = open_workbook(xlsfile)\nsheet = wb.sheet_by_name('robot_list')\nwith open(os.path.join(os.path.dirname(__file__), 'storage/robot_list.csv'),\n 'w') as file:\n writer = csv.writer(file, delimiter=',')\n headers = [cell.value for cell in sheet.row(0)]\n writer.writerow(headers)\n for i in range(1, sheet.nrows):\n rowvalue_list = [(str(cell.value).strip() if cell.value else None) for\n cell in sheet.row(i)]\n writer.writerow(rowvalue_list)\n", "<import token>\nxlsfile = glob.glob(os.path.join(os.path.dirname(__file__),\n 'storage/robot*.xls'))[0]\nwb = open_workbook(xlsfile)\nsheet = wb.sheet_by_name('robot_list')\nwith open(os.path.join(os.path.dirname(__file__), 'storage/robot_list.csv'),\n 'w') as file:\n writer = csv.writer(file, delimiter=',')\n headers = [cell.value for cell in sheet.row(0)]\n writer.writerow(headers)\n for i in range(1, sheet.nrows):\n rowvalue_list = [(str(cell.value).strip() if cell.value else None) for\n cell in sheet.row(i)]\n writer.writerow(rowvalue_list)\n", "<import token>\n<assignment token>\nwith open(os.path.join(os.path.dirname(__file__), 'storage/robot_list.csv'),\n 'w') as file:\n writer = csv.writer(file, delimiter=',')\n headers = [cell.value for cell in sheet.row(0)]\n writer.writerow(headers)\n for i in range(1, sheet.nrows):\n rowvalue_list = [(str(cell.value).strip() if cell.value else None) for\n cell in sheet.row(i)]\n writer.writerow(rowvalue_list)\n", "<import token>\n<assignment token>\n<code token>\n" ]
false
376
7a59c8c883a9aaa723175783e01aa62e23503fde
#!/C:\Program Files (x86)\Python35-32 #importar librarias necesarias from urllib.request import urlopen from bs4 import BeautifulSoup
[ "#!/C:\\Program Files (x86)\\Python35-32\n\n#importar librarias necesarias\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n", "from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n", "<import token>\n" ]
false
377
48d0bfdc607a4605ef82f5c7dc7fd6fc85c4255f
''' BMI=weight*0.45259227/(hei*0.0254)** ''' wht=2 if wht==0: print("wht is",wht) else: print("whtsdsb") #今天也完成了100波比跳 wei=float(input("wei=")) hei=float(input("hei=")) bmi=(wei*0.45259227)/((hei*0.0254)**2) print("BMI=",bmi) if bmi<18.5: print("too light") elif bmi<25: print("normal") elif bmi<30: print("over") else: print("wanghantangshidssb")
[ "\n'''\nBMI=weight*0.45259227/(hei*0.0254)**\n'''\nwht=2\nif wht==0:\n print(\"wht is\",wht)\nelse:\n print(\"whtsdsb\")\n#今天也完成了100波比跳\nwei=float(input(\"wei=\"))\nhei=float(input(\"hei=\"))\nbmi=(wei*0.45259227)/((hei*0.0254)**2)\nprint(\"BMI=\",bmi)\nif bmi<18.5:\n print(\"too light\")\nelif bmi<25:\n print(\"normal\")\nelif bmi<30:\n print(\"over\")\nelse:\n print(\"wanghantangshidssb\")\n", "<docstring token>\nwht = 2\nif wht == 0:\n print('wht is', wht)\nelse:\n print('whtsdsb')\nwei = float(input('wei='))\nhei = float(input('hei='))\nbmi = wei * 0.45259227 / (hei * 0.0254) ** 2\nprint('BMI=', bmi)\nif bmi < 18.5:\n print('too light')\nelif bmi < 25:\n print('normal')\nelif bmi < 30:\n print('over')\nelse:\n print('wanghantangshidssb')\n", "<docstring token>\n<assignment token>\nif wht == 0:\n print('wht is', wht)\nelse:\n print('whtsdsb')\n<assignment token>\nprint('BMI=', bmi)\nif bmi < 18.5:\n print('too light')\nelif bmi < 25:\n print('normal')\nelif bmi < 30:\n print('over')\nelse:\n print('wanghantangshidssb')\n", "<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n" ]
false
378
05e4bcc7323b908a7b45d766ada463ce172e25c4
import graphene import f1hub.drivers.schema import f1hub.results.schema import f1hub.constructors.schema import f1hub.races.schema import f1hub.status.schema import f1hub.circuits.schema import f1hub.constructorresults.schema import f1hub.constructorstandings.schema import f1hub.driverstandings.schema import f1hub.laptimes.schema import f1hub.pitstops.schema import f1hub.qualifying.schema import f1hub.seasons.schema class Query(f1hub.drivers.schema.Query, f1hub.results.schema.Query, f1hub.constructors.schema.Query, f1hub.races.schema.Query, f1hub.status.schema.Query, f1hub.circuits.schema.Query,\ f1hub.constructorresults.schema.Query, f1hub.constructorstandings.schema.Query, f1hub.driverstandings.schema.Query, f1hub.laptimes.schema.Query, f1hub.pitstops.schema.Query,\ f1hub.qualifying.schema.Query, f1hub.seasons.schema.Query, graphene.ObjectType): pass schema = graphene.Schema(query=Query)
[ "import graphene\n\nimport f1hub.drivers.schema\nimport f1hub.results.schema\nimport f1hub.constructors.schema\nimport f1hub.races.schema\nimport f1hub.status.schema\nimport f1hub.circuits.schema\nimport f1hub.constructorresults.schema\nimport f1hub.constructorstandings.schema\nimport f1hub.driverstandings.schema\nimport f1hub.laptimes.schema\nimport f1hub.pitstops.schema\nimport f1hub.qualifying.schema\nimport f1hub.seasons.schema\n\n\nclass Query(f1hub.drivers.schema.Query, f1hub.results.schema.Query, f1hub.constructors.schema.Query, f1hub.races.schema.Query, f1hub.status.schema.Query, f1hub.circuits.schema.Query,\\\n f1hub.constructorresults.schema.Query, f1hub.constructorstandings.schema.Query, f1hub.driverstandings.schema.Query, f1hub.laptimes.schema.Query, f1hub.pitstops.schema.Query,\\\n f1hub.qualifying.schema.Query, f1hub.seasons.schema.Query, graphene.ObjectType):\n pass\n\n\nschema = graphene.Schema(query=Query)\n", "import graphene\nimport f1hub.drivers.schema\nimport f1hub.results.schema\nimport f1hub.constructors.schema\nimport f1hub.races.schema\nimport f1hub.status.schema\nimport f1hub.circuits.schema\nimport f1hub.constructorresults.schema\nimport f1hub.constructorstandings.schema\nimport f1hub.driverstandings.schema\nimport f1hub.laptimes.schema\nimport f1hub.pitstops.schema\nimport f1hub.qualifying.schema\nimport f1hub.seasons.schema\n\n\nclass Query(f1hub.drivers.schema.Query, f1hub.results.schema.Query, f1hub.\n constructors.schema.Query, f1hub.races.schema.Query, f1hub.status.\n schema.Query, f1hub.circuits.schema.Query, f1hub.constructorresults.\n schema.Query, f1hub.constructorstandings.schema.Query, f1hub.\n driverstandings.schema.Query, f1hub.laptimes.schema.Query, f1hub.\n pitstops.schema.Query, f1hub.qualifying.schema.Query, f1hub.seasons.\n schema.Query, graphene.ObjectType):\n pass\n\n\nschema = graphene.Schema(query=Query)\n", "<import token>\n\n\nclass Query(f1hub.drivers.schema.Query, f1hub.results.schema.Query, f1hub.\n constructors.schema.Query, f1hub.races.schema.Query, f1hub.status.\n schema.Query, f1hub.circuits.schema.Query, f1hub.constructorresults.\n schema.Query, f1hub.constructorstandings.schema.Query, f1hub.\n driverstandings.schema.Query, f1hub.laptimes.schema.Query, f1hub.\n pitstops.schema.Query, f1hub.qualifying.schema.Query, f1hub.seasons.\n schema.Query, graphene.ObjectType):\n pass\n\n\nschema = graphene.Schema(query=Query)\n", "<import token>\n\n\nclass Query(f1hub.drivers.schema.Query, f1hub.results.schema.Query, f1hub.\n constructors.schema.Query, f1hub.races.schema.Query, f1hub.status.\n schema.Query, f1hub.circuits.schema.Query, f1hub.constructorresults.\n schema.Query, f1hub.constructorstandings.schema.Query, f1hub.\n driverstandings.schema.Query, f1hub.laptimes.schema.Query, f1hub.\n pitstops.schema.Query, f1hub.qualifying.schema.Query, f1hub.seasons.\n schema.Query, graphene.ObjectType):\n pass\n\n\n<assignment token>\n", "<import token>\n<class token>\n<assignment token>\n" ]
false
379
7530c2c85f83d1714840ba97c1ec702f063658c5
from typing import List import glm import pxng import OpenGL.GL as gl class VertexArrayObject: def __init__(self, primitive): self._primitive = primitive self._buffers: List[pxng.BufferObject] = [] self._indices = pxng.BufferObject(data_type=self.index_data_type, array_type=gl.GL_ELEMENT_ARRAY_BUFFER) self._vao = gl.glGenVertexArrays(1) def attach_buffer(self, vbo: pxng.BufferObject): self._buffers.append(vbo) return len(self._buffers) - 1 def add_quad(self, p1, p2, p3, p4): i = self._buffers[0].index self._buffers[0].set_value(p1) self._buffers[0].set_value(p2) self._buffers[0].set_value(p3) self._buffers[0].set_value(p4) self._indices.set_value(glm.u16vec3(i, i + 1, i + 3)) self._indices.set_value(glm.u16vec3(i + 1, i + 2, i + 3)) def add_triangle(self, p1, p2, p3): i = self._buffers[0].index self._buffers[0].set_value(p1) self._buffers[0].set_value(p2) self._buffers[0].set_value(p3) self._indices.set_value(glm.u16vec3(i, i + 1, i + 2)) def add_line(self, p1, p2): i = self._buffers[0].index self._buffers[0].set_value(p1) self._buffers[0].set_value(p2) self._indices.set_value(glm.u16vec2(i, i + 1)) def add_point(self, p1): i = self._buffers[0].index self._buffers[0].set_value(p1) self._indices.set_value(glm.u16vec1(i)) def set_colors(self, *args: glm.vec4, target=1): for c in args: self._buffers[target].set_value(c) def set_texture(self, *args: glm.vec2 or glm.uvec2, target=1): for c in args: self._buffers[target].set_value(c) def create(self): gl.glBindVertexArray(self._vao) for index, vbo in enumerate(self._buffers): vbo.bind(index) self._indices.bind(None) def reset(self): self._indices.reset() for vbo in self._buffers: vbo.reset() def draw(self): index_count = len(self._indices) * self.primitive_component_count gl.glDrawElements(self._primitive, index_count, gl.GL_UNSIGNED_SHORT, None) @property def index_data_type(self): if self._primitive == gl.GL_TRIANGLES: return glm.u16vec3 elif self._primitive == gl.GL_LINES: return glm.u16vec2 elif self._primitive == gl.GL_POINTS: return glm.u16vec1 else: raise UserWarning(f'Unknown primitive type {self._primitive}') @property def primitive_component_count(self): if self._primitive == gl.GL_TRIANGLES: return 3 elif self._primitive == gl.GL_LINES: return 2 elif self._primitive == gl.GL_POINTS: return 1 else: raise UserWarning(f'Unknown primitive type {self._primitive}') def bind(self): gl.glBindVertexArray(self._vao) if self._indices.bind(None): if any(vbo.changed for vbo in self._buffers): self.create() return True gl.glBindVertexArray(0) return False
[ "from typing import List\n\nimport glm\nimport pxng\n\nimport OpenGL.GL as gl\n\n\nclass VertexArrayObject:\n def __init__(self, primitive):\n self._primitive = primitive\n self._buffers: List[pxng.BufferObject] = []\n self._indices = pxng.BufferObject(data_type=self.index_data_type,\n array_type=gl.GL_ELEMENT_ARRAY_BUFFER)\n self._vao = gl.glGenVertexArrays(1)\n\n def attach_buffer(self, vbo: pxng.BufferObject):\n self._buffers.append(vbo)\n return len(self._buffers) - 1\n\n def add_quad(self, p1, p2, p3, p4):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._buffers[0].set_value(p3)\n self._buffers[0].set_value(p4)\n self._indices.set_value(glm.u16vec3(i, i + 1, i + 3))\n self._indices.set_value(glm.u16vec3(i + 1, i + 2, i + 3))\n\n def add_triangle(self, p1, p2, p3):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._buffers[0].set_value(p3)\n self._indices.set_value(glm.u16vec3(i, i + 1, i + 2))\n\n def add_line(self, p1, p2):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._indices.set_value(glm.u16vec2(i, i + 1))\n\n def add_point(self, p1):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._indices.set_value(glm.u16vec1(i))\n\n def set_colors(self, *args: glm.vec4, target=1):\n for c in args:\n self._buffers[target].set_value(c)\n\n def set_texture(self, *args: glm.vec2 or glm.uvec2, target=1):\n for c in args:\n self._buffers[target].set_value(c)\n\n def create(self):\n gl.glBindVertexArray(self._vao)\n\n for index, vbo in enumerate(self._buffers):\n vbo.bind(index)\n\n self._indices.bind(None)\n\n def reset(self):\n self._indices.reset()\n for vbo in self._buffers:\n vbo.reset()\n\n def draw(self):\n index_count = len(self._indices) * self.primitive_component_count\n gl.glDrawElements(self._primitive, index_count, gl.GL_UNSIGNED_SHORT, None)\n\n @property\n def index_data_type(self):\n if self._primitive == gl.GL_TRIANGLES:\n return glm.u16vec3\n elif self._primitive == gl.GL_LINES:\n return glm.u16vec2\n elif self._primitive == gl.GL_POINTS:\n return glm.u16vec1\n else:\n raise UserWarning(f'Unknown primitive type {self._primitive}')\n\n @property\n def primitive_component_count(self):\n if self._primitive == gl.GL_TRIANGLES:\n return 3\n elif self._primitive == gl.GL_LINES:\n return 2\n elif self._primitive == gl.GL_POINTS:\n return 1\n else:\n raise UserWarning(f'Unknown primitive type {self._primitive}')\n\n def bind(self):\n gl.glBindVertexArray(self._vao)\n if self._indices.bind(None):\n if any(vbo.changed for vbo in self._buffers):\n self.create()\n return True\n gl.glBindVertexArray(0)\n return False\n", "from typing import List\nimport glm\nimport pxng\nimport OpenGL.GL as gl\n\n\nclass VertexArrayObject:\n\n def __init__(self, primitive):\n self._primitive = primitive\n self._buffers: List[pxng.BufferObject] = []\n self._indices = pxng.BufferObject(data_type=self.index_data_type,\n array_type=gl.GL_ELEMENT_ARRAY_BUFFER)\n self._vao = gl.glGenVertexArrays(1)\n\n def attach_buffer(self, vbo: pxng.BufferObject):\n self._buffers.append(vbo)\n return len(self._buffers) - 1\n\n def add_quad(self, p1, p2, p3, p4):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._buffers[0].set_value(p3)\n self._buffers[0].set_value(p4)\n self._indices.set_value(glm.u16vec3(i, i + 1, i + 3))\n self._indices.set_value(glm.u16vec3(i + 1, i + 2, i + 3))\n\n def add_triangle(self, p1, p2, p3):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._buffers[0].set_value(p3)\n self._indices.set_value(glm.u16vec3(i, i + 1, i + 2))\n\n def add_line(self, p1, p2):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._indices.set_value(glm.u16vec2(i, i + 1))\n\n def add_point(self, p1):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._indices.set_value(glm.u16vec1(i))\n\n def set_colors(self, *args: glm.vec4, target=1):\n for c in args:\n self._buffers[target].set_value(c)\n\n def set_texture(self, *args: (glm.vec2 or glm.uvec2), target=1):\n for c in args:\n self._buffers[target].set_value(c)\n\n def create(self):\n gl.glBindVertexArray(self._vao)\n for index, vbo in enumerate(self._buffers):\n vbo.bind(index)\n self._indices.bind(None)\n\n def reset(self):\n self._indices.reset()\n for vbo in self._buffers:\n vbo.reset()\n\n def draw(self):\n index_count = len(self._indices) * self.primitive_component_count\n gl.glDrawElements(self._primitive, index_count, gl.\n GL_UNSIGNED_SHORT, None)\n\n @property\n def index_data_type(self):\n if self._primitive == gl.GL_TRIANGLES:\n return glm.u16vec3\n elif self._primitive == gl.GL_LINES:\n return glm.u16vec2\n elif self._primitive == gl.GL_POINTS:\n return glm.u16vec1\n else:\n raise UserWarning(f'Unknown primitive type {self._primitive}')\n\n @property\n def primitive_component_count(self):\n if self._primitive == gl.GL_TRIANGLES:\n return 3\n elif self._primitive == gl.GL_LINES:\n return 2\n elif self._primitive == gl.GL_POINTS:\n return 1\n else:\n raise UserWarning(f'Unknown primitive type {self._primitive}')\n\n def bind(self):\n gl.glBindVertexArray(self._vao)\n if self._indices.bind(None):\n if any(vbo.changed for vbo in self._buffers):\n self.create()\n return True\n gl.glBindVertexArray(0)\n return False\n", "<import token>\n\n\nclass VertexArrayObject:\n\n def __init__(self, primitive):\n self._primitive = primitive\n self._buffers: List[pxng.BufferObject] = []\n self._indices = pxng.BufferObject(data_type=self.index_data_type,\n array_type=gl.GL_ELEMENT_ARRAY_BUFFER)\n self._vao = gl.glGenVertexArrays(1)\n\n def attach_buffer(self, vbo: pxng.BufferObject):\n self._buffers.append(vbo)\n return len(self._buffers) - 1\n\n def add_quad(self, p1, p2, p3, p4):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._buffers[0].set_value(p3)\n self._buffers[0].set_value(p4)\n self._indices.set_value(glm.u16vec3(i, i + 1, i + 3))\n self._indices.set_value(glm.u16vec3(i + 1, i + 2, i + 3))\n\n def add_triangle(self, p1, p2, p3):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._buffers[0].set_value(p3)\n self._indices.set_value(glm.u16vec3(i, i + 1, i + 2))\n\n def add_line(self, p1, p2):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._indices.set_value(glm.u16vec2(i, i + 1))\n\n def add_point(self, p1):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._indices.set_value(glm.u16vec1(i))\n\n def set_colors(self, *args: glm.vec4, target=1):\n for c in args:\n self._buffers[target].set_value(c)\n\n def set_texture(self, *args: (glm.vec2 or glm.uvec2), target=1):\n for c in args:\n self._buffers[target].set_value(c)\n\n def create(self):\n gl.glBindVertexArray(self._vao)\n for index, vbo in enumerate(self._buffers):\n vbo.bind(index)\n self._indices.bind(None)\n\n def reset(self):\n self._indices.reset()\n for vbo in self._buffers:\n vbo.reset()\n\n def draw(self):\n index_count = len(self._indices) * self.primitive_component_count\n gl.glDrawElements(self._primitive, index_count, gl.\n GL_UNSIGNED_SHORT, None)\n\n @property\n def index_data_type(self):\n if self._primitive == gl.GL_TRIANGLES:\n return glm.u16vec3\n elif self._primitive == gl.GL_LINES:\n return glm.u16vec2\n elif self._primitive == gl.GL_POINTS:\n return glm.u16vec1\n else:\n raise UserWarning(f'Unknown primitive type {self._primitive}')\n\n @property\n def primitive_component_count(self):\n if self._primitive == gl.GL_TRIANGLES:\n return 3\n elif self._primitive == gl.GL_LINES:\n return 2\n elif self._primitive == gl.GL_POINTS:\n return 1\n else:\n raise UserWarning(f'Unknown primitive type {self._primitive}')\n\n def bind(self):\n gl.glBindVertexArray(self._vao)\n if self._indices.bind(None):\n if any(vbo.changed for vbo in self._buffers):\n self.create()\n return True\n gl.glBindVertexArray(0)\n return False\n", "<import token>\n\n\nclass VertexArrayObject:\n\n def __init__(self, primitive):\n self._primitive = primitive\n self._buffers: List[pxng.BufferObject] = []\n self._indices = pxng.BufferObject(data_type=self.index_data_type,\n array_type=gl.GL_ELEMENT_ARRAY_BUFFER)\n self._vao = gl.glGenVertexArrays(1)\n\n def attach_buffer(self, vbo: pxng.BufferObject):\n self._buffers.append(vbo)\n return len(self._buffers) - 1\n\n def add_quad(self, p1, p2, p3, p4):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._buffers[0].set_value(p3)\n self._buffers[0].set_value(p4)\n self._indices.set_value(glm.u16vec3(i, i + 1, i + 3))\n self._indices.set_value(glm.u16vec3(i + 1, i + 2, i + 3))\n\n def add_triangle(self, p1, p2, p3):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._buffers[0].set_value(p3)\n self._indices.set_value(glm.u16vec3(i, i + 1, i + 2))\n\n def add_line(self, p1, p2):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._indices.set_value(glm.u16vec2(i, i + 1))\n <function token>\n\n def set_colors(self, *args: glm.vec4, target=1):\n for c in args:\n self._buffers[target].set_value(c)\n\n def set_texture(self, *args: (glm.vec2 or glm.uvec2), target=1):\n for c in args:\n self._buffers[target].set_value(c)\n\n def create(self):\n gl.glBindVertexArray(self._vao)\n for index, vbo in enumerate(self._buffers):\n vbo.bind(index)\n self._indices.bind(None)\n\n def reset(self):\n self._indices.reset()\n for vbo in self._buffers:\n vbo.reset()\n\n def draw(self):\n index_count = len(self._indices) * self.primitive_component_count\n gl.glDrawElements(self._primitive, index_count, gl.\n GL_UNSIGNED_SHORT, None)\n\n @property\n def index_data_type(self):\n if self._primitive == gl.GL_TRIANGLES:\n return glm.u16vec3\n elif self._primitive == gl.GL_LINES:\n return glm.u16vec2\n elif self._primitive == gl.GL_POINTS:\n return glm.u16vec1\n else:\n raise UserWarning(f'Unknown primitive type {self._primitive}')\n\n @property\n def primitive_component_count(self):\n if self._primitive == gl.GL_TRIANGLES:\n return 3\n elif self._primitive == gl.GL_LINES:\n return 2\n elif self._primitive == gl.GL_POINTS:\n return 1\n else:\n raise UserWarning(f'Unknown primitive type {self._primitive}')\n\n def bind(self):\n gl.glBindVertexArray(self._vao)\n if self._indices.bind(None):\n if any(vbo.changed for vbo in self._buffers):\n self.create()\n return True\n gl.glBindVertexArray(0)\n return False\n", "<import token>\n\n\nclass VertexArrayObject:\n <function token>\n\n def attach_buffer(self, vbo: pxng.BufferObject):\n self._buffers.append(vbo)\n return len(self._buffers) - 1\n\n def add_quad(self, p1, p2, p3, p4):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._buffers[0].set_value(p3)\n self._buffers[0].set_value(p4)\n self._indices.set_value(glm.u16vec3(i, i + 1, i + 3))\n self._indices.set_value(glm.u16vec3(i + 1, i + 2, i + 3))\n\n def add_triangle(self, p1, p2, p3):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._buffers[0].set_value(p3)\n self._indices.set_value(glm.u16vec3(i, i + 1, i + 2))\n\n def add_line(self, p1, p2):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._indices.set_value(glm.u16vec2(i, i + 1))\n <function token>\n\n def set_colors(self, *args: glm.vec4, target=1):\n for c in args:\n self._buffers[target].set_value(c)\n\n def set_texture(self, *args: (glm.vec2 or glm.uvec2), target=1):\n for c in args:\n self._buffers[target].set_value(c)\n\n def create(self):\n gl.glBindVertexArray(self._vao)\n for index, vbo in enumerate(self._buffers):\n vbo.bind(index)\n self._indices.bind(None)\n\n def reset(self):\n self._indices.reset()\n for vbo in self._buffers:\n vbo.reset()\n\n def draw(self):\n index_count = len(self._indices) * self.primitive_component_count\n gl.glDrawElements(self._primitive, index_count, gl.\n GL_UNSIGNED_SHORT, None)\n\n @property\n def index_data_type(self):\n if self._primitive == gl.GL_TRIANGLES:\n return glm.u16vec3\n elif self._primitive == gl.GL_LINES:\n return glm.u16vec2\n elif self._primitive == gl.GL_POINTS:\n return glm.u16vec1\n else:\n raise UserWarning(f'Unknown primitive type {self._primitive}')\n\n @property\n def primitive_component_count(self):\n if self._primitive == gl.GL_TRIANGLES:\n return 3\n elif self._primitive == gl.GL_LINES:\n return 2\n elif self._primitive == gl.GL_POINTS:\n return 1\n else:\n raise UserWarning(f'Unknown primitive type {self._primitive}')\n\n def bind(self):\n gl.glBindVertexArray(self._vao)\n if self._indices.bind(None):\n if any(vbo.changed for vbo in self._buffers):\n self.create()\n return True\n gl.glBindVertexArray(0)\n return False\n", "<import token>\n\n\nclass VertexArrayObject:\n <function token>\n\n def attach_buffer(self, vbo: pxng.BufferObject):\n self._buffers.append(vbo)\n return len(self._buffers) - 1\n\n def add_quad(self, p1, p2, p3, p4):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._buffers[0].set_value(p3)\n self._buffers[0].set_value(p4)\n self._indices.set_value(glm.u16vec3(i, i + 1, i + 3))\n self._indices.set_value(glm.u16vec3(i + 1, i + 2, i + 3))\n\n def add_triangle(self, p1, p2, p3):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._buffers[0].set_value(p3)\n self._indices.set_value(glm.u16vec3(i, i + 1, i + 2))\n\n def add_line(self, p1, p2):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._indices.set_value(glm.u16vec2(i, i + 1))\n <function token>\n\n def set_colors(self, *args: glm.vec4, target=1):\n for c in args:\n self._buffers[target].set_value(c)\n\n def set_texture(self, *args: (glm.vec2 or glm.uvec2), target=1):\n for c in args:\n self._buffers[target].set_value(c)\n\n def create(self):\n gl.glBindVertexArray(self._vao)\n for index, vbo in enumerate(self._buffers):\n vbo.bind(index)\n self._indices.bind(None)\n\n def reset(self):\n self._indices.reset()\n for vbo in self._buffers:\n vbo.reset()\n <function token>\n\n @property\n def index_data_type(self):\n if self._primitive == gl.GL_TRIANGLES:\n return glm.u16vec3\n elif self._primitive == gl.GL_LINES:\n return glm.u16vec2\n elif self._primitive == gl.GL_POINTS:\n return glm.u16vec1\n else:\n raise UserWarning(f'Unknown primitive type {self._primitive}')\n\n @property\n def primitive_component_count(self):\n if self._primitive == gl.GL_TRIANGLES:\n return 3\n elif self._primitive == gl.GL_LINES:\n return 2\n elif self._primitive == gl.GL_POINTS:\n return 1\n else:\n raise UserWarning(f'Unknown primitive type {self._primitive}')\n\n def bind(self):\n gl.glBindVertexArray(self._vao)\n if self._indices.bind(None):\n if any(vbo.changed for vbo in self._buffers):\n self.create()\n return True\n gl.glBindVertexArray(0)\n return False\n", "<import token>\n\n\nclass VertexArrayObject:\n <function token>\n\n def attach_buffer(self, vbo: pxng.BufferObject):\n self._buffers.append(vbo)\n return len(self._buffers) - 1\n\n def add_quad(self, p1, p2, p3, p4):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._buffers[0].set_value(p3)\n self._buffers[0].set_value(p4)\n self._indices.set_value(glm.u16vec3(i, i + 1, i + 3))\n self._indices.set_value(glm.u16vec3(i + 1, i + 2, i + 3))\n\n def add_triangle(self, p1, p2, p3):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._buffers[0].set_value(p3)\n self._indices.set_value(glm.u16vec3(i, i + 1, i + 2))\n\n def add_line(self, p1, p2):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._indices.set_value(glm.u16vec2(i, i + 1))\n <function token>\n\n def set_colors(self, *args: glm.vec4, target=1):\n for c in args:\n self._buffers[target].set_value(c)\n\n def set_texture(self, *args: (glm.vec2 or glm.uvec2), target=1):\n for c in args:\n self._buffers[target].set_value(c)\n <function token>\n\n def reset(self):\n self._indices.reset()\n for vbo in self._buffers:\n vbo.reset()\n <function token>\n\n @property\n def index_data_type(self):\n if self._primitive == gl.GL_TRIANGLES:\n return glm.u16vec3\n elif self._primitive == gl.GL_LINES:\n return glm.u16vec2\n elif self._primitive == gl.GL_POINTS:\n return glm.u16vec1\n else:\n raise UserWarning(f'Unknown primitive type {self._primitive}')\n\n @property\n def primitive_component_count(self):\n if self._primitive == gl.GL_TRIANGLES:\n return 3\n elif self._primitive == gl.GL_LINES:\n return 2\n elif self._primitive == gl.GL_POINTS:\n return 1\n else:\n raise UserWarning(f'Unknown primitive type {self._primitive}')\n\n def bind(self):\n gl.glBindVertexArray(self._vao)\n if self._indices.bind(None):\n if any(vbo.changed for vbo in self._buffers):\n self.create()\n return True\n gl.glBindVertexArray(0)\n return False\n", "<import token>\n\n\nclass VertexArrayObject:\n <function token>\n\n def attach_buffer(self, vbo: pxng.BufferObject):\n self._buffers.append(vbo)\n return len(self._buffers) - 1\n\n def add_quad(self, p1, p2, p3, p4):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._buffers[0].set_value(p3)\n self._buffers[0].set_value(p4)\n self._indices.set_value(glm.u16vec3(i, i + 1, i + 3))\n self._indices.set_value(glm.u16vec3(i + 1, i + 2, i + 3))\n <function token>\n\n def add_line(self, p1, p2):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._indices.set_value(glm.u16vec2(i, i + 1))\n <function token>\n\n def set_colors(self, *args: glm.vec4, target=1):\n for c in args:\n self._buffers[target].set_value(c)\n\n def set_texture(self, *args: (glm.vec2 or glm.uvec2), target=1):\n for c in args:\n self._buffers[target].set_value(c)\n <function token>\n\n def reset(self):\n self._indices.reset()\n for vbo in self._buffers:\n vbo.reset()\n <function token>\n\n @property\n def index_data_type(self):\n if self._primitive == gl.GL_TRIANGLES:\n return glm.u16vec3\n elif self._primitive == gl.GL_LINES:\n return glm.u16vec2\n elif self._primitive == gl.GL_POINTS:\n return glm.u16vec1\n else:\n raise UserWarning(f'Unknown primitive type {self._primitive}')\n\n @property\n def primitive_component_count(self):\n if self._primitive == gl.GL_TRIANGLES:\n return 3\n elif self._primitive == gl.GL_LINES:\n return 2\n elif self._primitive == gl.GL_POINTS:\n return 1\n else:\n raise UserWarning(f'Unknown primitive type {self._primitive}')\n\n def bind(self):\n gl.glBindVertexArray(self._vao)\n if self._indices.bind(None):\n if any(vbo.changed for vbo in self._buffers):\n self.create()\n return True\n gl.glBindVertexArray(0)\n return False\n", "<import token>\n\n\nclass VertexArrayObject:\n <function token>\n <function token>\n\n def add_quad(self, p1, p2, p3, p4):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._buffers[0].set_value(p3)\n self._buffers[0].set_value(p4)\n self._indices.set_value(glm.u16vec3(i, i + 1, i + 3))\n self._indices.set_value(glm.u16vec3(i + 1, i + 2, i + 3))\n <function token>\n\n def add_line(self, p1, p2):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._indices.set_value(glm.u16vec2(i, i + 1))\n <function token>\n\n def set_colors(self, *args: glm.vec4, target=1):\n for c in args:\n self._buffers[target].set_value(c)\n\n def set_texture(self, *args: (glm.vec2 or glm.uvec2), target=1):\n for c in args:\n self._buffers[target].set_value(c)\n <function token>\n\n def reset(self):\n self._indices.reset()\n for vbo in self._buffers:\n vbo.reset()\n <function token>\n\n @property\n def index_data_type(self):\n if self._primitive == gl.GL_TRIANGLES:\n return glm.u16vec3\n elif self._primitive == gl.GL_LINES:\n return glm.u16vec2\n elif self._primitive == gl.GL_POINTS:\n return glm.u16vec1\n else:\n raise UserWarning(f'Unknown primitive type {self._primitive}')\n\n @property\n def primitive_component_count(self):\n if self._primitive == gl.GL_TRIANGLES:\n return 3\n elif self._primitive == gl.GL_LINES:\n return 2\n elif self._primitive == gl.GL_POINTS:\n return 1\n else:\n raise UserWarning(f'Unknown primitive type {self._primitive}')\n\n def bind(self):\n gl.glBindVertexArray(self._vao)\n if self._indices.bind(None):\n if any(vbo.changed for vbo in self._buffers):\n self.create()\n return True\n gl.glBindVertexArray(0)\n return False\n", "<import token>\n\n\nclass VertexArrayObject:\n <function token>\n <function token>\n\n def add_quad(self, p1, p2, p3, p4):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._buffers[0].set_value(p3)\n self._buffers[0].set_value(p4)\n self._indices.set_value(glm.u16vec3(i, i + 1, i + 3))\n self._indices.set_value(glm.u16vec3(i + 1, i + 2, i + 3))\n <function token>\n\n def add_line(self, p1, p2):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._indices.set_value(glm.u16vec2(i, i + 1))\n <function token>\n\n def set_colors(self, *args: glm.vec4, target=1):\n for c in args:\n self._buffers[target].set_value(c)\n\n def set_texture(self, *args: (glm.vec2 or glm.uvec2), target=1):\n for c in args:\n self._buffers[target].set_value(c)\n <function token>\n\n def reset(self):\n self._indices.reset()\n for vbo in self._buffers:\n vbo.reset()\n <function token>\n\n @property\n def index_data_type(self):\n if self._primitive == gl.GL_TRIANGLES:\n return glm.u16vec3\n elif self._primitive == gl.GL_LINES:\n return glm.u16vec2\n elif self._primitive == gl.GL_POINTS:\n return glm.u16vec1\n else:\n raise UserWarning(f'Unknown primitive type {self._primitive}')\n <function token>\n\n def bind(self):\n gl.glBindVertexArray(self._vao)\n if self._indices.bind(None):\n if any(vbo.changed for vbo in self._buffers):\n self.create()\n return True\n gl.glBindVertexArray(0)\n return False\n", "<import token>\n\n\nclass VertexArrayObject:\n <function token>\n <function token>\n\n def add_quad(self, p1, p2, p3, p4):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._buffers[0].set_value(p3)\n self._buffers[0].set_value(p4)\n self._indices.set_value(glm.u16vec3(i, i + 1, i + 3))\n self._indices.set_value(glm.u16vec3(i + 1, i + 2, i + 3))\n <function token>\n <function token>\n <function token>\n\n def set_colors(self, *args: glm.vec4, target=1):\n for c in args:\n self._buffers[target].set_value(c)\n\n def set_texture(self, *args: (glm.vec2 or glm.uvec2), target=1):\n for c in args:\n self._buffers[target].set_value(c)\n <function token>\n\n def reset(self):\n self._indices.reset()\n for vbo in self._buffers:\n vbo.reset()\n <function token>\n\n @property\n def index_data_type(self):\n if self._primitive == gl.GL_TRIANGLES:\n return glm.u16vec3\n elif self._primitive == gl.GL_LINES:\n return glm.u16vec2\n elif self._primitive == gl.GL_POINTS:\n return glm.u16vec1\n else:\n raise UserWarning(f'Unknown primitive type {self._primitive}')\n <function token>\n\n def bind(self):\n gl.glBindVertexArray(self._vao)\n if self._indices.bind(None):\n if any(vbo.changed for vbo in self._buffers):\n self.create()\n return True\n gl.glBindVertexArray(0)\n return False\n", "<import token>\n\n\nclass VertexArrayObject:\n <function token>\n <function token>\n\n def add_quad(self, p1, p2, p3, p4):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._buffers[0].set_value(p3)\n self._buffers[0].set_value(p4)\n self._indices.set_value(glm.u16vec3(i, i + 1, i + 3))\n self._indices.set_value(glm.u16vec3(i + 1, i + 2, i + 3))\n <function token>\n <function token>\n <function token>\n\n def set_colors(self, *args: glm.vec4, target=1):\n for c in args:\n self._buffers[target].set_value(c)\n\n def set_texture(self, *args: (glm.vec2 or glm.uvec2), target=1):\n for c in args:\n self._buffers[target].set_value(c)\n <function token>\n\n def reset(self):\n self._indices.reset()\n for vbo in self._buffers:\n vbo.reset()\n <function token>\n <function token>\n <function token>\n\n def bind(self):\n gl.glBindVertexArray(self._vao)\n if self._indices.bind(None):\n if any(vbo.changed for vbo in self._buffers):\n self.create()\n return True\n gl.glBindVertexArray(0)\n return False\n", "<import token>\n\n\nclass VertexArrayObject:\n <function token>\n <function token>\n\n def add_quad(self, p1, p2, p3, p4):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._buffers[0].set_value(p3)\n self._buffers[0].set_value(p4)\n self._indices.set_value(glm.u16vec3(i, i + 1, i + 3))\n self._indices.set_value(glm.u16vec3(i + 1, i + 2, i + 3))\n <function token>\n <function token>\n <function token>\n\n def set_colors(self, *args: glm.vec4, target=1):\n for c in args:\n self._buffers[target].set_value(c)\n\n def set_texture(self, *args: (glm.vec2 or glm.uvec2), target=1):\n for c in args:\n self._buffers[target].set_value(c)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def bind(self):\n gl.glBindVertexArray(self._vao)\n if self._indices.bind(None):\n if any(vbo.changed for vbo in self._buffers):\n self.create()\n return True\n gl.glBindVertexArray(0)\n return False\n", "<import token>\n\n\nclass VertexArrayObject:\n <function token>\n <function token>\n\n def add_quad(self, p1, p2, p3, p4):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._buffers[0].set_value(p3)\n self._buffers[0].set_value(p4)\n self._indices.set_value(glm.u16vec3(i, i + 1, i + 3))\n self._indices.set_value(glm.u16vec3(i + 1, i + 2, i + 3))\n <function token>\n <function token>\n <function token>\n\n def set_colors(self, *args: glm.vec4, target=1):\n for c in args:\n self._buffers[target].set_value(c)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def bind(self):\n gl.glBindVertexArray(self._vao)\n if self._indices.bind(None):\n if any(vbo.changed for vbo in self._buffers):\n self.create()\n return True\n gl.glBindVertexArray(0)\n return False\n", "<import token>\n\n\nclass VertexArrayObject:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def set_colors(self, *args: glm.vec4, target=1):\n for c in args:\n self._buffers[target].set_value(c)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def bind(self):\n gl.glBindVertexArray(self._vao)\n if self._indices.bind(None):\n if any(vbo.changed for vbo in self._buffers):\n self.create()\n return True\n gl.glBindVertexArray(0)\n return False\n", "<import token>\n\n\nclass VertexArrayObject:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def bind(self):\n gl.glBindVertexArray(self._vao)\n if self._indices.bind(None):\n if any(vbo.changed for vbo in self._buffers):\n self.create()\n return True\n gl.glBindVertexArray(0)\n return False\n", "<import token>\n\n\nclass VertexArrayObject:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n", "<import token>\n<class token>\n" ]
false
380
39fdb9c586c3cf92d493269ceac419e0058a763a
import pandas as pd import numpy as np import pyten.tenclass import pyten.method import pyten.tools def scalable(file_name=None, function_name=None, recover=None, omega=None, r=2, tol=1e-8, maxiter=100, init='random', printitn=0): """ Helios1 API returns CP_ALS, TUCKER_ALS, or NNCP decomposition or Recovery Result arg can be list, tuple, set, and array with numerical values. ----------- :param file_name: {Default: None} :param function_name: Tensor-based Method :param recover: Input '1' to recover other to decompose.{Default: None} :param omega: Index Tensor of Obseved Entries :param r: The rank of the Tensor you want to use for approximation (recover or decompose).{Default: 2} :param tol: Tolerance on difference in fit.(Convergence tolerance for both cp(als) or tucker(als).){Default: 1.0e-4} :param maxiter: Maximum number of iterations {Default: 50} :param init: Initial guess 'random'|'nvecs'|'eigs'. {Default 'random'} :param printitn: Print fit every n iterations; 0 for no printing. ----------- :return Ori: Original Tensor :return full: Full Tensor reconstructed by decomposed matrices :return Final: Decomposition Results e.g. Ttensor or Ktensor :return Rec: Recovered Tensor (Completed Tensor) ----------- """ # User Interface if file_name is None: file_name = raw_input("Please input the file_name of the data: \n") print("\n") if function_name is None: function_name = raw_input("Please choose the method you want to use to recover data(Input one number):\n" " 1. Distributed CP(ALS) 2.Distributed CP(ADMM) 3. DisTenC 0.Exit \n") print("\n") #if recover is None: # recover = raw_input("If there are missing values in the file? (Input one number)\n" # "1.Yes, recover it 2.No, just decompose (Missing entries in the original tensor will be replaced by 0) 0.Exit\n") # Use pandas package to load data ## if file_name[-3:] == 'csv': # dat1 = pd.read_csv(file_name, delimiter=';') # Data preprocessing # First: create Sptensor # dat = dat1.values # sha = dat.shape # subs = dat[:, range(sha[1] - 1)] # subs = subs - 1 # vals = dat[:, sha[1] - 1] # vals = vals.reshape(len(vals), 1) # siz = np.max(subs, 0) # siz = np.int32(siz + 1) # X1 = pyten.tenclass.Sptensor(subs, vals, siz) # Second: create Tensor object and find missing data # X = X1.totensor() # Ori = X.data # lstnan = np.isnan(X.data) # X.data = np.nan_to_num(X.data) # Construct omega #output = 1 # An output indicate flag. (Decompose: 1, Recover:2) Ori = None #if type(omega) != np.ndarray: # # if True in lstnan: # omega = X.data * 0 + 1 # omega[lstnan] = 0 # if recover == '1': # output = 2 # Choose method to recover or decompose if type(function_name) == str: if function_name == '1' or function_name == 'D_cp_als': Dals = pyten.method.TensorDecompositionALS() Dals.dir_data = file_name Dals.rank = r Dals.run() Dals.maxIter = maxiter Dals.tol = tol ###### Final = Dals.ktensor Rec = None full = Final.totensor() ###### elif function_name == '2' or function_name == 'D_ADMM': Dadmm = pyten.method.DistTensorADMM() Dadmm.dir_data = file_name Dadmm.rank = r Dadmm.run() Dadmm.maxIter = maxiter Dadmm.tol = tol ###### Final = Dadmm.ktensor Rec = None full = Final.totensor() ###### elif function_name == '3' or function_name == 'D_ADMM_C': DadmmC = pyten.method.DistTensorCompletionADMM() DadmmC.dir_data = file_name DadmmC.rank = r DadmmC.run() DadmmC.maxIter = maxiter DadmmC.tol = tol ###### Final = DadmmC.ktensor #Rec = Final.totensor().data * omega + X.data * (1 - omega) full = Final.totensor() Rec = full ###### elif function_name == '0': print 'Successfully Exit' return None, None, None, None else: raise ValueError('No Such Method') else: raise TypeError('No Such Method') # Output Result # [nv, nd] = subs.shape if function_name == 1 or function_name == 2: newsubs = full.tosptensor().subs tempvals = full.tosptensor().vals newfilename = file_name[:-4] + '_Decomposite' + file_name[-4:] #print "\n" + "The original Tensor is: " #print X1 print "\n" + "The Decomposed Result is: " print Final else: newsubs = Rec.tosptensor().subs tempvals = Rec.tosptensor().vals newfilename = file_name[:-4] + '_Recover' + file_name[-4:] #print "\n" + "The original Tensor is: " #print Ori print "\n" + "The Recovered Tensor is: " print Rec.data # Return result return Ori, full, Final, Rec
[ "import pandas as pd\nimport numpy as np\n\nimport pyten.tenclass\nimport pyten.method\nimport pyten.tools\n\n\ndef scalable(file_name=None, function_name=None, recover=None, omega=None, r=2, tol=1e-8, maxiter=100, init='random',\n printitn=0):\n \"\"\"\n Helios1 API returns CP_ALS, TUCKER_ALS, or NNCP decomposition or Recovery Result\n arg can be list, tuple, set, and array with numerical values.\n -----------\n :param file_name: {Default: None}\n :param function_name: Tensor-based Method\n :param recover: Input '1' to recover other to decompose.{Default: None}\n :param omega: Index Tensor of Obseved Entries\n :param r: The rank of the Tensor you want to use for approximation (recover or decompose).{Default: 2}\n :param tol: Tolerance on difference in fit.(Convergence tolerance for both cp(als) or tucker(als).){Default: 1.0e-4}\n :param maxiter: Maximum number of iterations {Default: 50}\n :param init: Initial guess 'random'|'nvecs'|'eigs'. {Default 'random'}\n :param printitn: Print fit every n iterations; 0 for no printing.\n -----------\n :return Ori: Original Tensor\n :return full: Full Tensor reconstructed by decomposed matrices\n :return Final: Decomposition Results e.g. Ttensor or Ktensor\n :return Rec: Recovered Tensor (Completed Tensor)\n -----------\n \"\"\"\n\n # User Interface\n if file_name is None:\n file_name = raw_input(\"Please input the file_name of the data: \\n\")\n print(\"\\n\")\n\n if function_name is None:\n function_name = raw_input(\"Please choose the method you want to use to recover data(Input one number):\\n\"\n \" 1. Distributed CP(ALS) 2.Distributed CP(ADMM) 3. DisTenC 0.Exit \\n\")\n print(\"\\n\")\n #if recover is None:\n # recover = raw_input(\"If there are missing values in the file? (Input one number)\\n\"\n # \"1.Yes, recover it 2.No, just decompose (Missing entries in the original tensor will be replaced by 0) 0.Exit\\n\")\n\n # Use pandas package to load data\n## if file_name[-3:] == 'csv':\n# dat1 = pd.read_csv(file_name, delimiter=';')\n\n # Data preprocessing\n # First: create Sptensor\n# dat = dat1.values\n# sha = dat.shape\n# subs = dat[:, range(sha[1] - 1)]\n# subs = subs - 1\n# vals = dat[:, sha[1] - 1]\n# vals = vals.reshape(len(vals), 1)\n# siz = np.max(subs, 0)\n# siz = np.int32(siz + 1)\n# X1 = pyten.tenclass.Sptensor(subs, vals, siz)\n\n # Second: create Tensor object and find missing data\n# X = X1.totensor()\n# Ori = X.data\n# lstnan = np.isnan(X.data)\n# X.data = np.nan_to_num(X.data)\n\n # Construct omega\n #output = 1 # An output indicate flag. (Decompose: 1, Recover:2)\n Ori = None\n #if type(omega) != np.ndarray:\n # # if True in lstnan:\n # omega = X.data * 0 + 1\n # omega[lstnan] = 0\n # if recover == '1':\n # output = 2\n\n # Choose method to recover or decompose\n if type(function_name) == str:\n if function_name == '1' or function_name == 'D_cp_als':\n Dals = pyten.method.TensorDecompositionALS()\n Dals.dir_data = file_name\n Dals.rank = r\n Dals.run()\n Dals.maxIter = maxiter\n Dals.tol = tol\n\n ######\n Final = Dals.ktensor\n Rec = None\n full = Final.totensor()\n ######\n\n elif function_name == '2' or function_name == 'D_ADMM':\n Dadmm = pyten.method.DistTensorADMM()\n Dadmm.dir_data = file_name\n Dadmm.rank = r\n Dadmm.run()\n Dadmm.maxIter = maxiter\n Dadmm.tol = tol\n\n ######\n Final = Dadmm.ktensor\n Rec = None\n full = Final.totensor()\n ######\n\n elif function_name == '3' or function_name == 'D_ADMM_C':\n DadmmC = pyten.method.DistTensorCompletionADMM()\n DadmmC.dir_data = file_name\n DadmmC.rank = r\n DadmmC.run()\n DadmmC.maxIter = maxiter\n DadmmC.tol = tol\n\n ######\n Final = DadmmC.ktensor\n #Rec = Final.totensor().data * omega + X.data * (1 - omega)\n full = Final.totensor()\n Rec = full\n ######\n\n elif function_name == '0':\n print 'Successfully Exit'\n return None, None, None, None\n else:\n raise ValueError('No Such Method')\n\n else:\n raise TypeError('No Such Method')\n\n # Output Result\n # [nv, nd] = subs.shape\n if function_name == 1 or function_name == 2:\n newsubs = full.tosptensor().subs\n tempvals = full.tosptensor().vals\n newfilename = file_name[:-4] + '_Decomposite' + file_name[-4:]\n #print \"\\n\" + \"The original Tensor is: \"\n #print X1\n print \"\\n\" + \"The Decomposed Result is: \"\n print Final\n else:\n newsubs = Rec.tosptensor().subs\n tempvals = Rec.tosptensor().vals\n newfilename = file_name[:-4] + '_Recover' + file_name[-4:]\n #print \"\\n\" + \"The original Tensor is: \"\n #print Ori\n print \"\\n\" + \"The Recovered Tensor is: \"\n print Rec.data\n\n # Return result\n return Ori, full, Final, Rec\n" ]
true
381
1f7d770106ea8e7d1c0bb90e1fc576b7ee2f0220
# Generated by Django 3.0.8 on 2020-08-28 17:37 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('shop', '0003_auto_20200828_1836'), ] operations = [ migrations.AddField( model_name='order', name='total', field=models.CharField(default=0, max_length=200), preserve_default=False, ), migrations.AlterField( model_name='order', name='items', field=models.CharField(max_length=300), ), ]
[ "# Generated by Django 3.0.8 on 2020-08-28 17:37\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('shop', '0003_auto_20200828_1836'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='order',\n name='total',\n field=models.CharField(default=0, max_length=200),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='order',\n name='items',\n field=models.CharField(max_length=300),\n ),\n ]\n", "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('shop', '0003_auto_20200828_1836')]\n operations = [migrations.AddField(model_name='order', name='total',\n field=models.CharField(default=0, max_length=200), preserve_default\n =False), migrations.AlterField(model_name='order', name='items',\n field=models.CharField(max_length=300))]\n", "<import token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('shop', '0003_auto_20200828_1836')]\n operations = [migrations.AddField(model_name='order', name='total',\n field=models.CharField(default=0, max_length=200), preserve_default\n =False), migrations.AlterField(model_name='order', name='items',\n field=models.CharField(max_length=300))]\n", "<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n", "<import token>\n<class token>\n" ]
false
382
01847c9e601eae6775cd4324483740c30e344557
from django.apps import AppConfig class CfCoreConfig(AppConfig): name = 'cf_core'
[ "from django.apps import AppConfig\n\n\nclass CfCoreConfig(AppConfig):\n name = 'cf_core'\n", "<import token>\n\n\nclass CfCoreConfig(AppConfig):\n name = 'cf_core'\n", "<import token>\n\n\nclass CfCoreConfig(AppConfig):\n <assignment token>\n", "<import token>\n<class token>\n" ]
false
383
ac2d4372f8913ea9ae1066833cca09985e521f99
#!/usr/bin/env python """\ Simple g-code streaming script for grbl """ import serial import time import csv import json import RPi.GPIO as GPIO from multiprocessing import Process, Queue class motion(): def __init__(self): # Open grbl serial port #self.s = serial.Serial("/dev/ttyUSB0",baudrate=115200,xonxoff=True,timeout=1) self.s = serial.Serial("/dev/ttyUSB0", baudrate=115200, timeout=0.1, rtscts=True, xonxoff=False) self.rsp='' self.posx=0.0 self.posy=0.0 self.positions_file = '/home/pi/Work/Wall2.0/system/positions.csv' self.home_position_file = '/home/pi/Work/Wall2.0/system/home.csv' self.mode = 'delay' self.sensor_pin = 3 self.interval = 1 GPIO.setmode(GPIO.BOARD) # GPIO.setup(self.sensor_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP) GPIO.setup(self.sensor_pin, GPIO.IN) # Wake up grbl self.s.write("\r\n\r\n") time.sleep(2) # Wait for grbl to initialize self.s.flushInput() # Flush startup text in serial input self.feedrate = 100 self.update_feedrate(0) with open(self.positions_file,'w') as f: f.write('posx,posy\n') self.homex=None self.homey=None with open(self.home_position_file,'r') as f: lines = csv.DictReader(f) for l in lines: print 'x_home: '+l['homex'] print 'y_home: '+l['homey'] self.homex = float(l['homex']) self.homey = float(l['homey']) # set origin offset #self.send("g92 x0 y0") self.set_relative_position() self.pos_queue = Queue() self.serial_proc = Process(target=self.get_response, args=(self.pos_queue,)) self.serial_proc.start() def update_feedrate(self, feedrate): tmp = self.feedrate + feedrate if(tmp >= 100) and (tmp <= 800): self.feedrate = tmp # feedrate speed self.send("f"+str(self.feedrate)) def update_interval(self, interval): if(self.interval >= 1) and (self.interval <= 10): self.interval += interval def send(self, cmd): print 'Sending: ' + cmd self.s.write(cmd + '\n') # Send g-code block to grbl def move(self,sign_x, sign_y): x = "x"+str(sign_x*10) y = "y"+str(sign_y*10) #self.send("%") self.send(" ".join(["g1",x,y])) def move_to_position(self,x,y): x = "x"+str(x) y = "y"+str(y) self.send(" ".join(["g1",x,y])) def stop(self): self.send("!") self.send("%") if (self.homex!=None) and (self.homey!=None): time.sleep(0.5) self.set_absolute_position() self.update_current_position() self.move_to_position(self.homex,self.homey) self.set_relative_position() def disconnect(self): # Close file and serial port self.s.close() def get_response(self, q): while(1): tmp = self.s.readline() tmp = tmp.strip() if tmp is not '': try: tmp = json.loads(tmp) print tmp if 'r' in tmp.keys(): if 'sr' in tmp['r'].keys(): tmp = tmp['r'] if 'sr' in tmp.keys(): if 'posx' in tmp['sr'].keys(): self.posx=tmp['sr']['posx'] if 'posy' in tmp['sr'].keys(): self.posy=tmp['sr']['posy'] q.put((self.posx, self.posy)) print 'pos1: '+str((self.posx, self.posy)) except ValueError: print "get_response chocked" self.stop() time.sleep(1) else: time.sleep(.2) def record_current_position(self): self.send('{"sr":null}') print "Saving" # TODO: Check if serial_proc is running? self.update_current_position() with open(self.positions_file,'a') as f: f.write(str(self.posx)+','+str(self.posy)+'\n') def record_home_position(self): self.send('{"sr":null}') print "Saving home" # TODO: Check if serial_proc is running? self.update_current_position() self.homex = self.posx self.homey = self.posy with open(self.home_position_file,'w') as f: f.write('homex,homey\n') f.write(str(self.posx)+','+str(self.posy)+'\n') def delete_home_position(self): print "Deleting home" with open(self.home_position_file,'w') as f: f.write('homex,homey\n') self.homex = None self.homey = None def update_current_position(self): while not self.pos_queue.empty(): self.posx, self.posy = self.pos_queue.get() def getTrigger(self): return GPIO.input(self.sensor_pin) def changeMode(self): if self.mode == 'delay': self.mode = 'sensor' elif self.mode == 'sensor': self.mode = 'delay' def set_absolute_position(self): # absolute mode self.send("g90") def set_relative_position(self): # relative mode self.send("g91") def playback_saved_positions(self): self.set_absolute_position() self.update_current_position() with open(self.positions_file) as f: lines = csv.DictReader(f) for l in lines: print 'x_dst: '+l['posx']+' - '+str(self.posx) print 'y_dst: '+l['posy']+' - '+str(self.posy) x_dst = float(l['posx'])#-self.posx y_dst = float(l['posy'])#-self.posy x = ' x'+str((x_dst)) y = ' y'+str((y_dst)) print(x,y) self.send('g1'+x+y) while(1): self.update_current_position() if (self.posx != float(l['posx'])) or \ (self.posy != float(l['posy'])): time.sleep(.1) else: break if(self.mode == 'delay'): time.sleep(self.interval) elif(self.mode == 'sensor'): num_strikes = 0 while num_strikes < self.interval: while(not self.getTrigger()): time.sleep(.01) num_strikes += 1 # relative mode self.send("g91")
[ "#!/usr/bin/env python\n\"\"\"\\\nSimple g-code streaming script for grbl\n\"\"\"\n \nimport serial\nimport time\nimport csv\nimport json\nimport RPi.GPIO as GPIO\nfrom multiprocessing import Process, Queue\nclass motion():\n def __init__(self):\n # Open grbl serial port\n #self.s = serial.Serial(\"/dev/ttyUSB0\",baudrate=115200,xonxoff=True,timeout=1)\n self.s = serial.Serial(\"/dev/ttyUSB0\",\n baudrate=115200,\n timeout=0.1,\n rtscts=True,\n xonxoff=False)\n self.rsp=''\n self.posx=0.0\n self.posy=0.0\n self.positions_file = '/home/pi/Work/Wall2.0/system/positions.csv'\n self.home_position_file = '/home/pi/Work/Wall2.0/system/home.csv'\n self.mode = 'delay'\n self.sensor_pin = 3\n self.interval = 1\n GPIO.setmode(GPIO.BOARD)\n# GPIO.setup(self.sensor_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n GPIO.setup(self.sensor_pin, GPIO.IN)\n\n # Wake up grbl\n self.s.write(\"\\r\\n\\r\\n\")\n time.sleep(2) # Wait for grbl to initialize\n self.s.flushInput() # Flush startup text in serial input \n\n self.feedrate = 100\n self.update_feedrate(0)\n\n with open(self.positions_file,'w') as f:\n f.write('posx,posy\\n')\n\n self.homex=None\n self.homey=None\n with open(self.home_position_file,'r') as f:\n lines = csv.DictReader(f)\n for l in lines:\n print 'x_home: '+l['homex']\n print 'y_home: '+l['homey']\n self.homex = float(l['homex'])\n self.homey = float(l['homey'])\n\n # set origin offset\n #self.send(\"g92 x0 y0\")\n\n self.set_relative_position()\n\n self.pos_queue = Queue()\n self.serial_proc = Process(target=self.get_response,\n args=(self.pos_queue,))\n\n self.serial_proc.start()\n\n def update_feedrate(self, feedrate):\n tmp = self.feedrate + feedrate\n if(tmp >= 100) and (tmp <= 800):\n self.feedrate = tmp\n # feedrate speed\n self.send(\"f\"+str(self.feedrate))\n\n def update_interval(self, interval):\n if(self.interval >= 1) and (self.interval <= 10):\n self.interval += interval\n \n def send(self, cmd): \n print 'Sending: ' + cmd\n self.s.write(cmd + '\\n') # Send g-code block to grbl\n\n def move(self,sign_x, sign_y):\n x = \"x\"+str(sign_x*10) \n y = \"y\"+str(sign_y*10) \n #self.send(\"%\")\n self.send(\" \".join([\"g1\",x,y]))\n\n def move_to_position(self,x,y):\n x = \"x\"+str(x) \n y = \"y\"+str(y) \n self.send(\" \".join([\"g1\",x,y]))\n\n def stop(self):\n self.send(\"!\")\n self.send(\"%\")\n if (self.homex!=None) and (self.homey!=None):\n time.sleep(0.5)\n self.set_absolute_position()\n self.update_current_position()\n self.move_to_position(self.homex,self.homey)\n self.set_relative_position()\n\n def disconnect(self):\n # Close file and serial port\n self.s.close()\n\n def get_response(self, q):\n while(1):\n tmp = self.s.readline()\n tmp = tmp.strip()\n if tmp is not '':\n try:\n tmp = json.loads(tmp)\n print tmp\n if 'r' in tmp.keys():\n if 'sr' in tmp['r'].keys():\n tmp = tmp['r']\n if 'sr' in tmp.keys():\n if 'posx' in tmp['sr'].keys():\n self.posx=tmp['sr']['posx']\n if 'posy' in tmp['sr'].keys():\n self.posy=tmp['sr']['posy']\n q.put((self.posx, self.posy))\n print 'pos1: '+str((self.posx, self.posy))\n except ValueError:\n print \"get_response chocked\"\n self.stop()\n time.sleep(1)\n else:\n time.sleep(.2)\n\n def record_current_position(self):\n self.send('{\"sr\":null}')\n print \"Saving\"\n # TODO: Check if serial_proc is running?\n self.update_current_position()\n with open(self.positions_file,'a') as f:\n f.write(str(self.posx)+','+str(self.posy)+'\\n')\n\n def record_home_position(self):\n self.send('{\"sr\":null}')\n print \"Saving home\"\n # TODO: Check if serial_proc is running?\n self.update_current_position()\n self.homex = self.posx\n self.homey = self.posy\n with open(self.home_position_file,'w') as f:\n f.write('homex,homey\\n')\n f.write(str(self.posx)+','+str(self.posy)+'\\n')\n\n def delete_home_position(self):\n print \"Deleting home\"\n with open(self.home_position_file,'w') as f:\n f.write('homex,homey\\n')\n self.homex = None\n self.homey = None\n\n def update_current_position(self):\n while not self.pos_queue.empty():\n self.posx, self.posy = self.pos_queue.get()\n\n def getTrigger(self):\n return GPIO.input(self.sensor_pin)\n\n def changeMode(self):\n if self.mode == 'delay':\n self.mode = 'sensor'\n elif self.mode == 'sensor':\n self.mode = 'delay'\n\n def set_absolute_position(self):\n # absolute mode \n self.send(\"g90\")\n\n def set_relative_position(self):\n # relative mode \n self.send(\"g91\")\n\n def playback_saved_positions(self):\n self.set_absolute_position()\n self.update_current_position()\n with open(self.positions_file) as f:\n lines = csv.DictReader(f)\n for l in lines:\n print 'x_dst: '+l['posx']+' - '+str(self.posx)\n print 'y_dst: '+l['posy']+' - '+str(self.posy)\n x_dst = float(l['posx'])#-self.posx\n y_dst = float(l['posy'])#-self.posy\n x = ' x'+str((x_dst))\n y = ' y'+str((y_dst))\n print(x,y)\n self.send('g1'+x+y)\n while(1):\n self.update_current_position()\n if (self.posx != float(l['posx'])) or \\\n (self.posy != float(l['posy'])):\n time.sleep(.1)\n else:\n break\n\n if(self.mode == 'delay'):\n time.sleep(self.interval)\n elif(self.mode == 'sensor'):\n num_strikes = 0\n while num_strikes < self.interval:\n while(not self.getTrigger()):\n time.sleep(.01)\n num_strikes += 1\n # relative mode \n self.send(\"g91\")\n" ]
true
384
ca11e9cf0bcfcbd714c45b5c95bd2c2044b65909
"""Woma objects for dealing with HTTP. Request and Response inherit from webob's Request and Response objects, so see http://docs.webob.org/en/latest/ for full documentation. The only things documented here are the customizations. """ from webob import Request as BaseRequest from webob import Response as BaseResponse class Client(object): """Make requests to a wsgi app and return the response.""" def __init__(self, app): self.app = app def request(self, path, method, body=None): path = path or '/' request = BaseRequest.blank(path) request.method = method request.text = body or '' return request.get_response(self.app) def get(self, path=None): return self.request(path, 'GET') def post(self, path=None, body=None): return self.request(path, 'POST', body) def put(self, path=None, body=None): return self.request(path, 'PUT', body) class Request(BaseRequest): """A webob.Request with additional properties.""" @property def kwargs(self): """Returns 'router.kwargs' from environ if present, or {} otherwise.""" return self.environ.get('router.kwargs', {}) class Response(BaseResponse): """A webob.Response that can be initialized with defaults from request.""" @classmethod def for_request(cls, request): """Initialize a Response with defaults based on the request. >>> request = Request({}) >>> request.headers['Content-Type'] = 'text/html; charset=latin1' >>> response = Response.for_request(request) >>> response.content_type 'text/html' >>> response.charset 'latin1' """ return cls( status_code=200, content_type=request.content_type or 'text/plain', charset=request.charset or 'UTF-8') def write(self, text): """An alias for `response.text = text`. >>> response = Response() >>> response.write('some text') >>> response.text 'some text' """ self.text = text
[ "\"\"\"Woma objects for dealing with HTTP.\n\nRequest and Response inherit from webob's Request and Response objects, so see\nhttp://docs.webob.org/en/latest/ for full documentation. The only things\ndocumented here are the customizations.\n\n\"\"\"\nfrom webob import Request as BaseRequest\nfrom webob import Response as BaseResponse\n\n\nclass Client(object):\n \"\"\"Make requests to a wsgi app and return the response.\"\"\"\n\n def __init__(self, app):\n self.app = app\n\n def request(self, path, method, body=None):\n path = path or '/'\n request = BaseRequest.blank(path)\n request.method = method\n request.text = body or ''\n return request.get_response(self.app)\n\n def get(self, path=None):\n return self.request(path, 'GET')\n\n def post(self, path=None, body=None):\n return self.request(path, 'POST', body)\n\n def put(self, path=None, body=None):\n return self.request(path, 'PUT', body)\n\n\nclass Request(BaseRequest):\n \"\"\"A webob.Request with additional properties.\"\"\"\n\n @property\n def kwargs(self):\n \"\"\"Returns 'router.kwargs' from environ if present, or {} otherwise.\"\"\"\n return self.environ.get('router.kwargs', {})\n\n\nclass Response(BaseResponse):\n \"\"\"A webob.Response that can be initialized with defaults from request.\"\"\"\n\n @classmethod\n def for_request(cls, request):\n \"\"\"Initialize a Response with defaults based on the request.\n\n >>> request = Request({})\n >>> request.headers['Content-Type'] = 'text/html; charset=latin1'\n\n >>> response = Response.for_request(request)\n >>> response.content_type\n 'text/html'\n >>> response.charset\n 'latin1'\n\n \"\"\"\n return cls(\n status_code=200,\n content_type=request.content_type or 'text/plain',\n charset=request.charset or 'UTF-8')\n\n def write(self, text):\n \"\"\"An alias for `response.text = text`.\n\n >>> response = Response()\n >>> response.write('some text')\n >>> response.text\n 'some text'\n\n \"\"\"\n self.text = text\n", "<docstring token>\nfrom webob import Request as BaseRequest\nfrom webob import Response as BaseResponse\n\n\nclass Client(object):\n \"\"\"Make requests to a wsgi app and return the response.\"\"\"\n\n def __init__(self, app):\n self.app = app\n\n def request(self, path, method, body=None):\n path = path or '/'\n request = BaseRequest.blank(path)\n request.method = method\n request.text = body or ''\n return request.get_response(self.app)\n\n def get(self, path=None):\n return self.request(path, 'GET')\n\n def post(self, path=None, body=None):\n return self.request(path, 'POST', body)\n\n def put(self, path=None, body=None):\n return self.request(path, 'PUT', body)\n\n\nclass Request(BaseRequest):\n \"\"\"A webob.Request with additional properties.\"\"\"\n\n @property\n def kwargs(self):\n \"\"\"Returns 'router.kwargs' from environ if present, or {} otherwise.\"\"\"\n return self.environ.get('router.kwargs', {})\n\n\nclass Response(BaseResponse):\n \"\"\"A webob.Response that can be initialized with defaults from request.\"\"\"\n\n @classmethod\n def for_request(cls, request):\n \"\"\"Initialize a Response with defaults based on the request.\n\n >>> request = Request({})\n >>> request.headers['Content-Type'] = 'text/html; charset=latin1'\n\n >>> response = Response.for_request(request)\n >>> response.content_type\n 'text/html'\n >>> response.charset\n 'latin1'\n\n \"\"\"\n return cls(status_code=200, content_type=request.content_type or\n 'text/plain', charset=request.charset or 'UTF-8')\n\n def write(self, text):\n \"\"\"An alias for `response.text = text`.\n\n >>> response = Response()\n >>> response.write('some text')\n >>> response.text\n 'some text'\n\n \"\"\"\n self.text = text\n", "<docstring token>\n<import token>\n\n\nclass Client(object):\n \"\"\"Make requests to a wsgi app and return the response.\"\"\"\n\n def __init__(self, app):\n self.app = app\n\n def request(self, path, method, body=None):\n path = path or '/'\n request = BaseRequest.blank(path)\n request.method = method\n request.text = body or ''\n return request.get_response(self.app)\n\n def get(self, path=None):\n return self.request(path, 'GET')\n\n def post(self, path=None, body=None):\n return self.request(path, 'POST', body)\n\n def put(self, path=None, body=None):\n return self.request(path, 'PUT', body)\n\n\nclass Request(BaseRequest):\n \"\"\"A webob.Request with additional properties.\"\"\"\n\n @property\n def kwargs(self):\n \"\"\"Returns 'router.kwargs' from environ if present, or {} otherwise.\"\"\"\n return self.environ.get('router.kwargs', {})\n\n\nclass Response(BaseResponse):\n \"\"\"A webob.Response that can be initialized with defaults from request.\"\"\"\n\n @classmethod\n def for_request(cls, request):\n \"\"\"Initialize a Response with defaults based on the request.\n\n >>> request = Request({})\n >>> request.headers['Content-Type'] = 'text/html; charset=latin1'\n\n >>> response = Response.for_request(request)\n >>> response.content_type\n 'text/html'\n >>> response.charset\n 'latin1'\n\n \"\"\"\n return cls(status_code=200, content_type=request.content_type or\n 'text/plain', charset=request.charset or 'UTF-8')\n\n def write(self, text):\n \"\"\"An alias for `response.text = text`.\n\n >>> response = Response()\n >>> response.write('some text')\n >>> response.text\n 'some text'\n\n \"\"\"\n self.text = text\n", "<docstring token>\n<import token>\n\n\nclass Client(object):\n <docstring token>\n\n def __init__(self, app):\n self.app = app\n\n def request(self, path, method, body=None):\n path = path or '/'\n request = BaseRequest.blank(path)\n request.method = method\n request.text = body or ''\n return request.get_response(self.app)\n\n def get(self, path=None):\n return self.request(path, 'GET')\n\n def post(self, path=None, body=None):\n return self.request(path, 'POST', body)\n\n def put(self, path=None, body=None):\n return self.request(path, 'PUT', body)\n\n\nclass Request(BaseRequest):\n \"\"\"A webob.Request with additional properties.\"\"\"\n\n @property\n def kwargs(self):\n \"\"\"Returns 'router.kwargs' from environ if present, or {} otherwise.\"\"\"\n return self.environ.get('router.kwargs', {})\n\n\nclass Response(BaseResponse):\n \"\"\"A webob.Response that can be initialized with defaults from request.\"\"\"\n\n @classmethod\n def for_request(cls, request):\n \"\"\"Initialize a Response with defaults based on the request.\n\n >>> request = Request({})\n >>> request.headers['Content-Type'] = 'text/html; charset=latin1'\n\n >>> response = Response.for_request(request)\n >>> response.content_type\n 'text/html'\n >>> response.charset\n 'latin1'\n\n \"\"\"\n return cls(status_code=200, content_type=request.content_type or\n 'text/plain', charset=request.charset or 'UTF-8')\n\n def write(self, text):\n \"\"\"An alias for `response.text = text`.\n\n >>> response = Response()\n >>> response.write('some text')\n >>> response.text\n 'some text'\n\n \"\"\"\n self.text = text\n", "<docstring token>\n<import token>\n\n\nclass Client(object):\n <docstring token>\n\n def __init__(self, app):\n self.app = app\n <function token>\n\n def get(self, path=None):\n return self.request(path, 'GET')\n\n def post(self, path=None, body=None):\n return self.request(path, 'POST', body)\n\n def put(self, path=None, body=None):\n return self.request(path, 'PUT', body)\n\n\nclass Request(BaseRequest):\n \"\"\"A webob.Request with additional properties.\"\"\"\n\n @property\n def kwargs(self):\n \"\"\"Returns 'router.kwargs' from environ if present, or {} otherwise.\"\"\"\n return self.environ.get('router.kwargs', {})\n\n\nclass Response(BaseResponse):\n \"\"\"A webob.Response that can be initialized with defaults from request.\"\"\"\n\n @classmethod\n def for_request(cls, request):\n \"\"\"Initialize a Response with defaults based on the request.\n\n >>> request = Request({})\n >>> request.headers['Content-Type'] = 'text/html; charset=latin1'\n\n >>> response = Response.for_request(request)\n >>> response.content_type\n 'text/html'\n >>> response.charset\n 'latin1'\n\n \"\"\"\n return cls(status_code=200, content_type=request.content_type or\n 'text/plain', charset=request.charset or 'UTF-8')\n\n def write(self, text):\n \"\"\"An alias for `response.text = text`.\n\n >>> response = Response()\n >>> response.write('some text')\n >>> response.text\n 'some text'\n\n \"\"\"\n self.text = text\n", "<docstring token>\n<import token>\n\n\nclass Client(object):\n <docstring token>\n\n def __init__(self, app):\n self.app = app\n <function token>\n\n def get(self, path=None):\n return self.request(path, 'GET')\n\n def post(self, path=None, body=None):\n return self.request(path, 'POST', body)\n <function token>\n\n\nclass Request(BaseRequest):\n \"\"\"A webob.Request with additional properties.\"\"\"\n\n @property\n def kwargs(self):\n \"\"\"Returns 'router.kwargs' from environ if present, or {} otherwise.\"\"\"\n return self.environ.get('router.kwargs', {})\n\n\nclass Response(BaseResponse):\n \"\"\"A webob.Response that can be initialized with defaults from request.\"\"\"\n\n @classmethod\n def for_request(cls, request):\n \"\"\"Initialize a Response with defaults based on the request.\n\n >>> request = Request({})\n >>> request.headers['Content-Type'] = 'text/html; charset=latin1'\n\n >>> response = Response.for_request(request)\n >>> response.content_type\n 'text/html'\n >>> response.charset\n 'latin1'\n\n \"\"\"\n return cls(status_code=200, content_type=request.content_type or\n 'text/plain', charset=request.charset or 'UTF-8')\n\n def write(self, text):\n \"\"\"An alias for `response.text = text`.\n\n >>> response = Response()\n >>> response.write('some text')\n >>> response.text\n 'some text'\n\n \"\"\"\n self.text = text\n", "<docstring token>\n<import token>\n\n\nclass Client(object):\n <docstring token>\n <function token>\n <function token>\n\n def get(self, path=None):\n return self.request(path, 'GET')\n\n def post(self, path=None, body=None):\n return self.request(path, 'POST', body)\n <function token>\n\n\nclass Request(BaseRequest):\n \"\"\"A webob.Request with additional properties.\"\"\"\n\n @property\n def kwargs(self):\n \"\"\"Returns 'router.kwargs' from environ if present, or {} otherwise.\"\"\"\n return self.environ.get('router.kwargs', {})\n\n\nclass Response(BaseResponse):\n \"\"\"A webob.Response that can be initialized with defaults from request.\"\"\"\n\n @classmethod\n def for_request(cls, request):\n \"\"\"Initialize a Response with defaults based on the request.\n\n >>> request = Request({})\n >>> request.headers['Content-Type'] = 'text/html; charset=latin1'\n\n >>> response = Response.for_request(request)\n >>> response.content_type\n 'text/html'\n >>> response.charset\n 'latin1'\n\n \"\"\"\n return cls(status_code=200, content_type=request.content_type or\n 'text/plain', charset=request.charset or 'UTF-8')\n\n def write(self, text):\n \"\"\"An alias for `response.text = text`.\n\n >>> response = Response()\n >>> response.write('some text')\n >>> response.text\n 'some text'\n\n \"\"\"\n self.text = text\n", "<docstring token>\n<import token>\n\n\nclass Client(object):\n <docstring token>\n <function token>\n <function token>\n\n def get(self, path=None):\n return self.request(path, 'GET')\n <function token>\n <function token>\n\n\nclass Request(BaseRequest):\n \"\"\"A webob.Request with additional properties.\"\"\"\n\n @property\n def kwargs(self):\n \"\"\"Returns 'router.kwargs' from environ if present, or {} otherwise.\"\"\"\n return self.environ.get('router.kwargs', {})\n\n\nclass Response(BaseResponse):\n \"\"\"A webob.Response that can be initialized with defaults from request.\"\"\"\n\n @classmethod\n def for_request(cls, request):\n \"\"\"Initialize a Response with defaults based on the request.\n\n >>> request = Request({})\n >>> request.headers['Content-Type'] = 'text/html; charset=latin1'\n\n >>> response = Response.for_request(request)\n >>> response.content_type\n 'text/html'\n >>> response.charset\n 'latin1'\n\n \"\"\"\n return cls(status_code=200, content_type=request.content_type or\n 'text/plain', charset=request.charset or 'UTF-8')\n\n def write(self, text):\n \"\"\"An alias for `response.text = text`.\n\n >>> response = Response()\n >>> response.write('some text')\n >>> response.text\n 'some text'\n\n \"\"\"\n self.text = text\n", "<docstring token>\n<import token>\n\n\nclass Client(object):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass Request(BaseRequest):\n \"\"\"A webob.Request with additional properties.\"\"\"\n\n @property\n def kwargs(self):\n \"\"\"Returns 'router.kwargs' from environ if present, or {} otherwise.\"\"\"\n return self.environ.get('router.kwargs', {})\n\n\nclass Response(BaseResponse):\n \"\"\"A webob.Response that can be initialized with defaults from request.\"\"\"\n\n @classmethod\n def for_request(cls, request):\n \"\"\"Initialize a Response with defaults based on the request.\n\n >>> request = Request({})\n >>> request.headers['Content-Type'] = 'text/html; charset=latin1'\n\n >>> response = Response.for_request(request)\n >>> response.content_type\n 'text/html'\n >>> response.charset\n 'latin1'\n\n \"\"\"\n return cls(status_code=200, content_type=request.content_type or\n 'text/plain', charset=request.charset or 'UTF-8')\n\n def write(self, text):\n \"\"\"An alias for `response.text = text`.\n\n >>> response = Response()\n >>> response.write('some text')\n >>> response.text\n 'some text'\n\n \"\"\"\n self.text = text\n", "<docstring token>\n<import token>\n<class token>\n\n\nclass Request(BaseRequest):\n \"\"\"A webob.Request with additional properties.\"\"\"\n\n @property\n def kwargs(self):\n \"\"\"Returns 'router.kwargs' from environ if present, or {} otherwise.\"\"\"\n return self.environ.get('router.kwargs', {})\n\n\nclass Response(BaseResponse):\n \"\"\"A webob.Response that can be initialized with defaults from request.\"\"\"\n\n @classmethod\n def for_request(cls, request):\n \"\"\"Initialize a Response with defaults based on the request.\n\n >>> request = Request({})\n >>> request.headers['Content-Type'] = 'text/html; charset=latin1'\n\n >>> response = Response.for_request(request)\n >>> response.content_type\n 'text/html'\n >>> response.charset\n 'latin1'\n\n \"\"\"\n return cls(status_code=200, content_type=request.content_type or\n 'text/plain', charset=request.charset or 'UTF-8')\n\n def write(self, text):\n \"\"\"An alias for `response.text = text`.\n\n >>> response = Response()\n >>> response.write('some text')\n >>> response.text\n 'some text'\n\n \"\"\"\n self.text = text\n", "<docstring token>\n<import token>\n<class token>\n\n\nclass Request(BaseRequest):\n <docstring token>\n\n @property\n def kwargs(self):\n \"\"\"Returns 'router.kwargs' from environ if present, or {} otherwise.\"\"\"\n return self.environ.get('router.kwargs', {})\n\n\nclass Response(BaseResponse):\n \"\"\"A webob.Response that can be initialized with defaults from request.\"\"\"\n\n @classmethod\n def for_request(cls, request):\n \"\"\"Initialize a Response with defaults based on the request.\n\n >>> request = Request({})\n >>> request.headers['Content-Type'] = 'text/html; charset=latin1'\n\n >>> response = Response.for_request(request)\n >>> response.content_type\n 'text/html'\n >>> response.charset\n 'latin1'\n\n \"\"\"\n return cls(status_code=200, content_type=request.content_type or\n 'text/plain', charset=request.charset or 'UTF-8')\n\n def write(self, text):\n \"\"\"An alias for `response.text = text`.\n\n >>> response = Response()\n >>> response.write('some text')\n >>> response.text\n 'some text'\n\n \"\"\"\n self.text = text\n", "<docstring token>\n<import token>\n<class token>\n\n\nclass Request(BaseRequest):\n <docstring token>\n <function token>\n\n\nclass Response(BaseResponse):\n \"\"\"A webob.Response that can be initialized with defaults from request.\"\"\"\n\n @classmethod\n def for_request(cls, request):\n \"\"\"Initialize a Response with defaults based on the request.\n\n >>> request = Request({})\n >>> request.headers['Content-Type'] = 'text/html; charset=latin1'\n\n >>> response = Response.for_request(request)\n >>> response.content_type\n 'text/html'\n >>> response.charset\n 'latin1'\n\n \"\"\"\n return cls(status_code=200, content_type=request.content_type or\n 'text/plain', charset=request.charset or 'UTF-8')\n\n def write(self, text):\n \"\"\"An alias for `response.text = text`.\n\n >>> response = Response()\n >>> response.write('some text')\n >>> response.text\n 'some text'\n\n \"\"\"\n self.text = text\n", "<docstring token>\n<import token>\n<class token>\n<class token>\n\n\nclass Response(BaseResponse):\n \"\"\"A webob.Response that can be initialized with defaults from request.\"\"\"\n\n @classmethod\n def for_request(cls, request):\n \"\"\"Initialize a Response with defaults based on the request.\n\n >>> request = Request({})\n >>> request.headers['Content-Type'] = 'text/html; charset=latin1'\n\n >>> response = Response.for_request(request)\n >>> response.content_type\n 'text/html'\n >>> response.charset\n 'latin1'\n\n \"\"\"\n return cls(status_code=200, content_type=request.content_type or\n 'text/plain', charset=request.charset or 'UTF-8')\n\n def write(self, text):\n \"\"\"An alias for `response.text = text`.\n\n >>> response = Response()\n >>> response.write('some text')\n >>> response.text\n 'some text'\n\n \"\"\"\n self.text = text\n", "<docstring token>\n<import token>\n<class token>\n<class token>\n\n\nclass Response(BaseResponse):\n <docstring token>\n\n @classmethod\n def for_request(cls, request):\n \"\"\"Initialize a Response with defaults based on the request.\n\n >>> request = Request({})\n >>> request.headers['Content-Type'] = 'text/html; charset=latin1'\n\n >>> response = Response.for_request(request)\n >>> response.content_type\n 'text/html'\n >>> response.charset\n 'latin1'\n\n \"\"\"\n return cls(status_code=200, content_type=request.content_type or\n 'text/plain', charset=request.charset or 'UTF-8')\n\n def write(self, text):\n \"\"\"An alias for `response.text = text`.\n\n >>> response = Response()\n >>> response.write('some text')\n >>> response.text\n 'some text'\n\n \"\"\"\n self.text = text\n", "<docstring token>\n<import token>\n<class token>\n<class token>\n\n\nclass Response(BaseResponse):\n <docstring token>\n <function token>\n\n def write(self, text):\n \"\"\"An alias for `response.text = text`.\n\n >>> response = Response()\n >>> response.write('some text')\n >>> response.text\n 'some text'\n\n \"\"\"\n self.text = text\n", "<docstring token>\n<import token>\n<class token>\n<class token>\n\n\nclass Response(BaseResponse):\n <docstring token>\n <function token>\n <function token>\n", "<docstring token>\n<import token>\n<class token>\n<class token>\n<class token>\n" ]
false
385
2c22f891f30825bcb97987c78a98988ad2a92210
import os import sys import json import logging import argparse from glob import glob from pricewatcher.tools import ensure_mkdir from pricewatcher.parser.f21 import ForeverParser from pricewatcher.parser.jcrew import JcrewParser from pricewatcher.utils.load_es import bulk_load_es BRAND_PARSERS={ 'forever21': ForeverParser, 'jcrew': JcrewParser } # Set up logging FORMAT = '[%(asctime)s][%(levelname)s] %(message)s' logging.basicConfig(format=FORMAT, datefmt='%m-%d-%Y %H:%M:%S') logging.getLogger().setLevel(logging.INFO) def date_handler(obj): return obj.isoformat() if hasattr(obj, 'isoformat') else obj def run(): parser = argparse.ArgumentParser(description='Process some integers.') parser.add_argument('--input-base', required=True, help='') parser.add_argument('--output-base', default='parsed_pages', help='') parser.add_argument('--datetime', required=True, help='YYYYMMDD') parser.add_argument('--hour', default='*', help='HH') parser.add_argument('--brand', default='*', choices=BRAND_PARSERS.keys(), help='') parser.add_argument('--load-es', action='store_true') parser.add_argument('--es-host', default='localhost', help='default to localhost') parser.add_argument('--es-port', default='9200', help='default to 9200') parser.add_argument('--es-cleanup', action='store_true', help='remove index before loading new data') args = parser.parse_args() # Argument parsing dt_str = args.datetime hour_str = args.hour brand_str = args.brand input_base = args.input_base output_base = args.output_base # ES arguments es_host, es_port = args.es_host, args.es_port load_es = args.load_es # Parsing Raw Pages input_files = glob(os.path.join(input_base, dt_str, hour_str, brand_str, '*', '*', '*')) for file_path in input_files: dt_str, hour_str, br, category, sub_category, filename = file_path.split('/')[-6:] parser = BRAND_PARSERS[brand_str](file_path) parsed_docs = parser.parse() if parsed_docs: doc_list, price_list = parsed_docs logging.info('[STATUS] parsed %s docs from %s' % (len(doc_list), file_path)) if not load_es: # Output Result output_dir = os.path.join(output_base, os.path.join(dt_str, hour_str, br, category)) ensure_mkdir(output_dir) output_path = os.path.join(output_dir, filename + '.json') logging.info('[WRITE] output to %s' % output_path) # Dump Product List with open(output_path + '.doc', 'w') as ofile: ofile.write(json.dumps(doc_list, default=date_handler)) with open(output_path + '.price', 'w') as ofile: ofile.write(json.dumps(price_list, default=date_handler)) else: #es_index, es_doctype = br, category logging.info('[LOAD ES] loading to ElasticSearch...') preprocessed_list = [] for doc in doc_list: preprocessed_list.append({ "index" : { "_index" : br, "_type" : category, "_id" : doc['product_id'] } }) preprocessed_list.append(doc) bulk_load_es(es_host, es_port, br, category, preprocessed_list, opt_dict=None) bulk_load_es(es_host, es_port, br, 'price', price_list)
[ "import os\nimport sys\nimport json\nimport logging\nimport argparse\nfrom glob import glob\n\nfrom pricewatcher.tools import ensure_mkdir\nfrom pricewatcher.parser.f21 import ForeverParser\nfrom pricewatcher.parser.jcrew import JcrewParser\nfrom pricewatcher.utils.load_es import bulk_load_es\n\nBRAND_PARSERS={\n'forever21': ForeverParser, \n'jcrew': JcrewParser\n}\n\n# Set up logging\nFORMAT = '[%(asctime)s][%(levelname)s] %(message)s'\nlogging.basicConfig(format=FORMAT, datefmt='%m-%d-%Y %H:%M:%S')\nlogging.getLogger().setLevel(logging.INFO)\n\ndef date_handler(obj):\n return obj.isoformat() if hasattr(obj, 'isoformat') else obj\n\ndef run():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('--input-base', required=True, help='')\n parser.add_argument('--output-base', default='parsed_pages', help='')\n parser.add_argument('--datetime', required=True, help='YYYYMMDD')\n parser.add_argument('--hour', default='*', help='HH')\n parser.add_argument('--brand', default='*', choices=BRAND_PARSERS.keys(), help='')\n parser.add_argument('--load-es', action='store_true')\n parser.add_argument('--es-host', default='localhost', help='default to localhost')\n parser.add_argument('--es-port', default='9200', help='default to 9200') \n parser.add_argument('--es-cleanup', action='store_true', help='remove index before loading new data')\n args = parser.parse_args()\n\n # Argument parsing\n dt_str = args.datetime\n hour_str = args.hour\n brand_str = args.brand\n input_base = args.input_base\n output_base = args.output_base\n\n # ES arguments\n es_host, es_port = args.es_host, args.es_port \n load_es = args.load_es\n\n # Parsing Raw Pages\n input_files = glob(os.path.join(input_base, dt_str, hour_str, brand_str, '*', '*', '*')) \n for file_path in input_files: \n dt_str, hour_str, br, category, sub_category, filename = file_path.split('/')[-6:] \n parser = BRAND_PARSERS[brand_str](file_path)\n parsed_docs = parser.parse()\n if parsed_docs:\n doc_list, price_list = parsed_docs\n\n logging.info('[STATUS] parsed %s docs from %s' % (len(doc_list), file_path))\n if not load_es: \n # Output Result \n output_dir = os.path.join(output_base, os.path.join(dt_str, hour_str, br, category))\n ensure_mkdir(output_dir)\n output_path = os.path.join(output_dir, filename + '.json') \n logging.info('[WRITE] output to %s' % output_path)\n # Dump Product List\n with open(output_path + '.doc', 'w') as ofile:\n ofile.write(json.dumps(doc_list, default=date_handler))\n with open(output_path + '.price', 'w') as ofile:\n ofile.write(json.dumps(price_list, default=date_handler))\n else:\n #es_index, es_doctype = br, category \n logging.info('[LOAD ES] loading to ElasticSearch...')\n preprocessed_list = []\n for doc in doc_list:\n preprocessed_list.append({ \"index\" : { \"_index\" : br, \"_type\" : category, \"_id\" : doc['product_id'] } })\n preprocessed_list.append(doc)\n bulk_load_es(es_host, es_port, br, category, preprocessed_list, opt_dict=None)\n bulk_load_es(es_host, es_port, br, 'price', price_list)\n\n", "import os\nimport sys\nimport json\nimport logging\nimport argparse\nfrom glob import glob\nfrom pricewatcher.tools import ensure_mkdir\nfrom pricewatcher.parser.f21 import ForeverParser\nfrom pricewatcher.parser.jcrew import JcrewParser\nfrom pricewatcher.utils.load_es import bulk_load_es\nBRAND_PARSERS = {'forever21': ForeverParser, 'jcrew': JcrewParser}\nFORMAT = '[%(asctime)s][%(levelname)s] %(message)s'\nlogging.basicConfig(format=FORMAT, datefmt='%m-%d-%Y %H:%M:%S')\nlogging.getLogger().setLevel(logging.INFO)\n\n\ndef date_handler(obj):\n return obj.isoformat() if hasattr(obj, 'isoformat') else obj\n\n\ndef run():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('--input-base', required=True, help='')\n parser.add_argument('--output-base', default='parsed_pages', help='')\n parser.add_argument('--datetime', required=True, help='YYYYMMDD')\n parser.add_argument('--hour', default='*', help='HH')\n parser.add_argument('--brand', default='*', choices=BRAND_PARSERS.keys(\n ), help='')\n parser.add_argument('--load-es', action='store_true')\n parser.add_argument('--es-host', default='localhost', help=\n 'default to localhost')\n parser.add_argument('--es-port', default='9200', help='default to 9200')\n parser.add_argument('--es-cleanup', action='store_true', help=\n 'remove index before loading new data')\n args = parser.parse_args()\n dt_str = args.datetime\n hour_str = args.hour\n brand_str = args.brand\n input_base = args.input_base\n output_base = args.output_base\n es_host, es_port = args.es_host, args.es_port\n load_es = args.load_es\n input_files = glob(os.path.join(input_base, dt_str, hour_str, brand_str,\n '*', '*', '*'))\n for file_path in input_files:\n dt_str, hour_str, br, category, sub_category, filename = (file_path\n .split('/')[-6:])\n parser = BRAND_PARSERS[brand_str](file_path)\n parsed_docs = parser.parse()\n if parsed_docs:\n doc_list, price_list = parsed_docs\n logging.info('[STATUS] parsed %s docs from %s' % (len(doc_list),\n file_path))\n if not load_es:\n output_dir = os.path.join(output_base, os.path.join(dt_str,\n hour_str, br, category))\n ensure_mkdir(output_dir)\n output_path = os.path.join(output_dir, filename + '.json')\n logging.info('[WRITE] output to %s' % output_path)\n with open(output_path + '.doc', 'w') as ofile:\n ofile.write(json.dumps(doc_list, default=date_handler))\n with open(output_path + '.price', 'w') as ofile:\n ofile.write(json.dumps(price_list, default=date_handler))\n else:\n logging.info('[LOAD ES] loading to ElasticSearch...')\n preprocessed_list = []\n for doc in doc_list:\n preprocessed_list.append({'index': {'_index': br, '_type':\n category, '_id': doc['product_id']}})\n preprocessed_list.append(doc)\n bulk_load_es(es_host, es_port, br, category, preprocessed_list,\n opt_dict=None)\n bulk_load_es(es_host, es_port, br, 'price', price_list)\n", "<import token>\nBRAND_PARSERS = {'forever21': ForeverParser, 'jcrew': JcrewParser}\nFORMAT = '[%(asctime)s][%(levelname)s] %(message)s'\nlogging.basicConfig(format=FORMAT, datefmt='%m-%d-%Y %H:%M:%S')\nlogging.getLogger().setLevel(logging.INFO)\n\n\ndef date_handler(obj):\n return obj.isoformat() if hasattr(obj, 'isoformat') else obj\n\n\ndef run():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('--input-base', required=True, help='')\n parser.add_argument('--output-base', default='parsed_pages', help='')\n parser.add_argument('--datetime', required=True, help='YYYYMMDD')\n parser.add_argument('--hour', default='*', help='HH')\n parser.add_argument('--brand', default='*', choices=BRAND_PARSERS.keys(\n ), help='')\n parser.add_argument('--load-es', action='store_true')\n parser.add_argument('--es-host', default='localhost', help=\n 'default to localhost')\n parser.add_argument('--es-port', default='9200', help='default to 9200')\n parser.add_argument('--es-cleanup', action='store_true', help=\n 'remove index before loading new data')\n args = parser.parse_args()\n dt_str = args.datetime\n hour_str = args.hour\n brand_str = args.brand\n input_base = args.input_base\n output_base = args.output_base\n es_host, es_port = args.es_host, args.es_port\n load_es = args.load_es\n input_files = glob(os.path.join(input_base, dt_str, hour_str, brand_str,\n '*', '*', '*'))\n for file_path in input_files:\n dt_str, hour_str, br, category, sub_category, filename = (file_path\n .split('/')[-6:])\n parser = BRAND_PARSERS[brand_str](file_path)\n parsed_docs = parser.parse()\n if parsed_docs:\n doc_list, price_list = parsed_docs\n logging.info('[STATUS] parsed %s docs from %s' % (len(doc_list),\n file_path))\n if not load_es:\n output_dir = os.path.join(output_base, os.path.join(dt_str,\n hour_str, br, category))\n ensure_mkdir(output_dir)\n output_path = os.path.join(output_dir, filename + '.json')\n logging.info('[WRITE] output to %s' % output_path)\n with open(output_path + '.doc', 'w') as ofile:\n ofile.write(json.dumps(doc_list, default=date_handler))\n with open(output_path + '.price', 'w') as ofile:\n ofile.write(json.dumps(price_list, default=date_handler))\n else:\n logging.info('[LOAD ES] loading to ElasticSearch...')\n preprocessed_list = []\n for doc in doc_list:\n preprocessed_list.append({'index': {'_index': br, '_type':\n category, '_id': doc['product_id']}})\n preprocessed_list.append(doc)\n bulk_load_es(es_host, es_port, br, category, preprocessed_list,\n opt_dict=None)\n bulk_load_es(es_host, es_port, br, 'price', price_list)\n", "<import token>\n<assignment token>\nlogging.basicConfig(format=FORMAT, datefmt='%m-%d-%Y %H:%M:%S')\nlogging.getLogger().setLevel(logging.INFO)\n\n\ndef date_handler(obj):\n return obj.isoformat() if hasattr(obj, 'isoformat') else obj\n\n\ndef run():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('--input-base', required=True, help='')\n parser.add_argument('--output-base', default='parsed_pages', help='')\n parser.add_argument('--datetime', required=True, help='YYYYMMDD')\n parser.add_argument('--hour', default='*', help='HH')\n parser.add_argument('--brand', default='*', choices=BRAND_PARSERS.keys(\n ), help='')\n parser.add_argument('--load-es', action='store_true')\n parser.add_argument('--es-host', default='localhost', help=\n 'default to localhost')\n parser.add_argument('--es-port', default='9200', help='default to 9200')\n parser.add_argument('--es-cleanup', action='store_true', help=\n 'remove index before loading new data')\n args = parser.parse_args()\n dt_str = args.datetime\n hour_str = args.hour\n brand_str = args.brand\n input_base = args.input_base\n output_base = args.output_base\n es_host, es_port = args.es_host, args.es_port\n load_es = args.load_es\n input_files = glob(os.path.join(input_base, dt_str, hour_str, brand_str,\n '*', '*', '*'))\n for file_path in input_files:\n dt_str, hour_str, br, category, sub_category, filename = (file_path\n .split('/')[-6:])\n parser = BRAND_PARSERS[brand_str](file_path)\n parsed_docs = parser.parse()\n if parsed_docs:\n doc_list, price_list = parsed_docs\n logging.info('[STATUS] parsed %s docs from %s' % (len(doc_list),\n file_path))\n if not load_es:\n output_dir = os.path.join(output_base, os.path.join(dt_str,\n hour_str, br, category))\n ensure_mkdir(output_dir)\n output_path = os.path.join(output_dir, filename + '.json')\n logging.info('[WRITE] output to %s' % output_path)\n with open(output_path + '.doc', 'w') as ofile:\n ofile.write(json.dumps(doc_list, default=date_handler))\n with open(output_path + '.price', 'w') as ofile:\n ofile.write(json.dumps(price_list, default=date_handler))\n else:\n logging.info('[LOAD ES] loading to ElasticSearch...')\n preprocessed_list = []\n for doc in doc_list:\n preprocessed_list.append({'index': {'_index': br, '_type':\n category, '_id': doc['product_id']}})\n preprocessed_list.append(doc)\n bulk_load_es(es_host, es_port, br, category, preprocessed_list,\n opt_dict=None)\n bulk_load_es(es_host, es_port, br, 'price', price_list)\n", "<import token>\n<assignment token>\n<code token>\n\n\ndef date_handler(obj):\n return obj.isoformat() if hasattr(obj, 'isoformat') else obj\n\n\ndef run():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('--input-base', required=True, help='')\n parser.add_argument('--output-base', default='parsed_pages', help='')\n parser.add_argument('--datetime', required=True, help='YYYYMMDD')\n parser.add_argument('--hour', default='*', help='HH')\n parser.add_argument('--brand', default='*', choices=BRAND_PARSERS.keys(\n ), help='')\n parser.add_argument('--load-es', action='store_true')\n parser.add_argument('--es-host', default='localhost', help=\n 'default to localhost')\n parser.add_argument('--es-port', default='9200', help='default to 9200')\n parser.add_argument('--es-cleanup', action='store_true', help=\n 'remove index before loading new data')\n args = parser.parse_args()\n dt_str = args.datetime\n hour_str = args.hour\n brand_str = args.brand\n input_base = args.input_base\n output_base = args.output_base\n es_host, es_port = args.es_host, args.es_port\n load_es = args.load_es\n input_files = glob(os.path.join(input_base, dt_str, hour_str, brand_str,\n '*', '*', '*'))\n for file_path in input_files:\n dt_str, hour_str, br, category, sub_category, filename = (file_path\n .split('/')[-6:])\n parser = BRAND_PARSERS[brand_str](file_path)\n parsed_docs = parser.parse()\n if parsed_docs:\n doc_list, price_list = parsed_docs\n logging.info('[STATUS] parsed %s docs from %s' % (len(doc_list),\n file_path))\n if not load_es:\n output_dir = os.path.join(output_base, os.path.join(dt_str,\n hour_str, br, category))\n ensure_mkdir(output_dir)\n output_path = os.path.join(output_dir, filename + '.json')\n logging.info('[WRITE] output to %s' % output_path)\n with open(output_path + '.doc', 'w') as ofile:\n ofile.write(json.dumps(doc_list, default=date_handler))\n with open(output_path + '.price', 'w') as ofile:\n ofile.write(json.dumps(price_list, default=date_handler))\n else:\n logging.info('[LOAD ES] loading to ElasticSearch...')\n preprocessed_list = []\n for doc in doc_list:\n preprocessed_list.append({'index': {'_index': br, '_type':\n category, '_id': doc['product_id']}})\n preprocessed_list.append(doc)\n bulk_load_es(es_host, es_port, br, category, preprocessed_list,\n opt_dict=None)\n bulk_load_es(es_host, es_port, br, 'price', price_list)\n", "<import token>\n<assignment token>\n<code token>\n\n\ndef date_handler(obj):\n return obj.isoformat() if hasattr(obj, 'isoformat') else obj\n\n\n<function token>\n", "<import token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n" ]
false
386
3cd7abf9659fe1db0ef3aa58df8dd7fd959e10a6
import os import csv import re totWords = 0 wordLen = 0 totSentWithPunctuation = 0 sourceFile = os.path.join('Resources', 'paragraph_2.txt') with open(sourceFile, 'r') as paragraph: paragraph = paragraph.read().split("\n\n") for sentence in paragraph: # Remove punctuation from sentences sentWithPunctuation = sentence sentNoPunctuation = re.sub(r'[^\w\s]','',sentence) #Split sentence with no punctuation by words using spaces words = sentNoPunctuation.split(" ") for word in words: wordLen = wordLen + len(word) # Compute totals for output message totWords = totWords + len(words) # Total words for all sentences avgSentLen_Words = round(totWords / len(paragraph),2) # Average words for all sentences avgLetterCount = round(wordLen/totWords,2) # Average letter by word for all sentences totSentWithPunctuation = totSentWithPunctuation + len(sentWithPunctuation) avgSentLen_chars = round(totSentWithPunctuation / len(paragraph),2) #Validate output by printing a test line # print(f"words: {len(words)} S w Punct. len: {len(sentWithPunctuation)} Sentence: {sentWithPunctuation}") print(f"\n\nParagraph Analysis of '{sourceFile}' file") print(f"---------------------------------------------------------") print(f" Approximate Word Count: {totWords} ") print(f" Approximate Sentence Count: {len(paragraph)} ") print(f" Average Letter Count: {avgLetterCount} ") print(f" Average Sentence Length (words): {avgSentLen_Words} ") print(f" Average Sentence Length (chars): {avgSentLen_chars} ")
[ "import os\nimport csv\nimport re\n\ntotWords = 0\nwordLen = 0\ntotSentWithPunctuation = 0\n\nsourceFile = os.path.join('Resources', 'paragraph_2.txt')\n\nwith open(sourceFile, 'r') as paragraph:\n paragraph = paragraph.read().split(\"\\n\\n\")\n\n\nfor sentence in paragraph:\n # Remove punctuation from sentences\n sentWithPunctuation = sentence\n sentNoPunctuation = re.sub(r'[^\\w\\s]','',sentence)\n\n #Split sentence with no punctuation by words using spaces\n words = sentNoPunctuation.split(\" \")\n for word in words:\n wordLen = wordLen + len(word)\n\n # Compute totals for output message \n totWords = totWords + len(words) # Total words for all sentences\n avgSentLen_Words = round(totWords / len(paragraph),2) # Average words for all sentences\n avgLetterCount = round(wordLen/totWords,2) # Average letter by word for all sentences\n totSentWithPunctuation = totSentWithPunctuation + len(sentWithPunctuation)\n avgSentLen_chars = round(totSentWithPunctuation / len(paragraph),2)\n\n #Validate output by printing a test line\n # print(f\"words: {len(words)} S w Punct. len: {len(sentWithPunctuation)} Sentence: {sentWithPunctuation}\")\n\nprint(f\"\\n\\nParagraph Analysis of '{sourceFile}' file\")\nprint(f\"---------------------------------------------------------\")\nprint(f\" Approximate Word Count: {totWords} \")\nprint(f\" Approximate Sentence Count: {len(paragraph)} \")\nprint(f\" Average Letter Count: {avgLetterCount} \")\nprint(f\" Average Sentence Length (words): {avgSentLen_Words} \")\nprint(f\" Average Sentence Length (chars): {avgSentLen_chars} \")\n\n\n\n\n\n", "import os\nimport csv\nimport re\ntotWords = 0\nwordLen = 0\ntotSentWithPunctuation = 0\nsourceFile = os.path.join('Resources', 'paragraph_2.txt')\nwith open(sourceFile, 'r') as paragraph:\n paragraph = paragraph.read().split('\\n\\n')\nfor sentence in paragraph:\n sentWithPunctuation = sentence\n sentNoPunctuation = re.sub('[^\\\\w\\\\s]', '', sentence)\n words = sentNoPunctuation.split(' ')\n for word in words:\n wordLen = wordLen + len(word)\n totWords = totWords + len(words)\n avgSentLen_Words = round(totWords / len(paragraph), 2)\n avgLetterCount = round(wordLen / totWords, 2)\n totSentWithPunctuation = totSentWithPunctuation + len(sentWithPunctuation)\n avgSentLen_chars = round(totSentWithPunctuation / len(paragraph), 2)\nprint(f\"\"\"\n\nParagraph Analysis of '{sourceFile}' file\"\"\")\nprint(f'---------------------------------------------------------')\nprint(f' Approximate Word Count: {totWords} ')\nprint(f' Approximate Sentence Count: {len(paragraph)} ')\nprint(f' Average Letter Count: {avgLetterCount} ')\nprint(f' Average Sentence Length (words): {avgSentLen_Words} ')\nprint(f' Average Sentence Length (chars): {avgSentLen_chars} ')\n", "<import token>\ntotWords = 0\nwordLen = 0\ntotSentWithPunctuation = 0\nsourceFile = os.path.join('Resources', 'paragraph_2.txt')\nwith open(sourceFile, 'r') as paragraph:\n paragraph = paragraph.read().split('\\n\\n')\nfor sentence in paragraph:\n sentWithPunctuation = sentence\n sentNoPunctuation = re.sub('[^\\\\w\\\\s]', '', sentence)\n words = sentNoPunctuation.split(' ')\n for word in words:\n wordLen = wordLen + len(word)\n totWords = totWords + len(words)\n avgSentLen_Words = round(totWords / len(paragraph), 2)\n avgLetterCount = round(wordLen / totWords, 2)\n totSentWithPunctuation = totSentWithPunctuation + len(sentWithPunctuation)\n avgSentLen_chars = round(totSentWithPunctuation / len(paragraph), 2)\nprint(f\"\"\"\n\nParagraph Analysis of '{sourceFile}' file\"\"\")\nprint(f'---------------------------------------------------------')\nprint(f' Approximate Word Count: {totWords} ')\nprint(f' Approximate Sentence Count: {len(paragraph)} ')\nprint(f' Average Letter Count: {avgLetterCount} ')\nprint(f' Average Sentence Length (words): {avgSentLen_Words} ')\nprint(f' Average Sentence Length (chars): {avgSentLen_chars} ')\n", "<import token>\n<assignment token>\nwith open(sourceFile, 'r') as paragraph:\n paragraph = paragraph.read().split('\\n\\n')\nfor sentence in paragraph:\n sentWithPunctuation = sentence\n sentNoPunctuation = re.sub('[^\\\\w\\\\s]', '', sentence)\n words = sentNoPunctuation.split(' ')\n for word in words:\n wordLen = wordLen + len(word)\n totWords = totWords + len(words)\n avgSentLen_Words = round(totWords / len(paragraph), 2)\n avgLetterCount = round(wordLen / totWords, 2)\n totSentWithPunctuation = totSentWithPunctuation + len(sentWithPunctuation)\n avgSentLen_chars = round(totSentWithPunctuation / len(paragraph), 2)\nprint(f\"\"\"\n\nParagraph Analysis of '{sourceFile}' file\"\"\")\nprint(f'---------------------------------------------------------')\nprint(f' Approximate Word Count: {totWords} ')\nprint(f' Approximate Sentence Count: {len(paragraph)} ')\nprint(f' Average Letter Count: {avgLetterCount} ')\nprint(f' Average Sentence Length (words): {avgSentLen_Words} ')\nprint(f' Average Sentence Length (chars): {avgSentLen_chars} ')\n", "<import token>\n<assignment token>\n<code token>\n" ]
false
387
b95619f3f52ff3747e38ecc153123962d0122a4d
# noinspection PyStatementEffect { 'name': 'ldap_user', 'summary': '', 'description': '域账号用户管理,登录及查询用户信息', 'author': '', 'website': '', 'source': {'git': 'https://github.com/LeiQiao/Parasite-Plugins.git', 'branch': 'master'}, 'category': '', 'version': '0.1', 'api': { '/user/token': 'user_api.gen_token', '/user/captcha': 'user_api.gen_captcha', '/user/login': { 'POST': 'user_api.login' }, '/user/search': 'user_api.search_users' }, # any plugin necessary for this one to work correctly 'depends': ['base', 'base_api_wrapper', 'redis_client', 'i18n'] }
[ "# noinspection PyStatementEffect\n{\n 'name': 'ldap_user',\n 'summary': '',\n 'description': '域账号用户管理,登录及查询用户信息',\n 'author': '',\n 'website': '',\n 'source': {'git': 'https://github.com/LeiQiao/Parasite-Plugins.git', 'branch': 'master'},\n\n 'category': '',\n 'version': '0.1',\n\n 'api': {\n '/user/token': 'user_api.gen_token',\n '/user/captcha': 'user_api.gen_captcha',\n '/user/login': {\n 'POST': 'user_api.login'\n },\n '/user/search': 'user_api.search_users'\n },\n\n # any plugin necessary for this one to work correctly\n 'depends': ['base', 'base_api_wrapper', 'redis_client', 'i18n']\n}\n", "{'name': 'ldap_user', 'summary': '', 'description': '域账号用户管理,登录及查询用户信息',\n 'author': '', 'website': '', 'source': {'git':\n 'https://github.com/LeiQiao/Parasite-Plugins.git', 'branch': 'master'},\n 'category': '', 'version': '0.1', 'api': {'/user/token':\n 'user_api.gen_token', '/user/captcha': 'user_api.gen_captcha',\n '/user/login': {'POST': 'user_api.login'}, '/user/search':\n 'user_api.search_users'}, 'depends': ['base', 'base_api_wrapper',\n 'redis_client', 'i18n']}\n", "<code token>\n" ]
false
388
c3527363cfc29ab7d598fe232d784b05ec2ef069
import models import json import reports.models import common.ot_utils def analyze_raw_reports(clean=True): if clean: delete_all_reports() COUNT = 100 offset = 0 while True: cont = analyze_raw_reports_subset(offset,COUNT) offset += COUNT if not cont: return def analyze_raw_reports_subset(offset,count): items = _collect_items(offset,count) if items: dump_items(items) return True return False def dump_items(items): wifis = [] locs = [] for (idx,item) in enumerate(items): if idx % 100 == 0: print '%d/%d' % (idx,len(items)) if 'wifi' in item.keys(): report_dt = common.ot_utils.get_utc_time_from_timestamp(float(item['time'])/1000) m = models.Report(device_id=item['device_id'],timestamp=report_dt) m.save() item_loc = item.get('location_api') if item_loc: loc = models.LocationInfo(report=m, lat=item_loc['lat'], lon=item_loc['long'], provider=item_loc['provider'], timestamp = common.ot_utils.get_utc_time_from_timestamp(float(item_loc['time'])/1000), accuracy = item_loc['accuracy']) locs.append(loc) for wifi in item['wifi']: wifis.append(models.SingleWifiReport(SSID=wifi['SSID'], signal=wifi['signal'], frequency=wifi['frequency'], key=wifi['key'], report=m)) print 'Saving all dependant objects' models.SingleWifiReport.objects.bulk_create(wifis) models.LocationInfo.objects.bulk_create(locs) def delete_all_reports(): common.ot_utils.delete_from_model(models.SingleWifiReport) common.ot_utils.delete_from_model(models.LocationInfo) common.ot_utils.delete_from_model(models.Report) def _collect_items(offset,count): all_reports_count = reports.models.RawReport.objects.count() print '*** offset = %d count = %d all_reports_count = %d' % (offset,count,all_reports_count) all_reports = reports.models.RawReport.objects.all()[offset:offset+count] result = [] for rj in all_reports: items = json.loads(rj.text)['items'] result.extend(items) return result
[ "import models\nimport json\nimport reports.models\nimport common.ot_utils\n\ndef analyze_raw_reports(clean=True):\n if clean:\n delete_all_reports()\n COUNT = 100\n offset = 0\n while True:\n cont = analyze_raw_reports_subset(offset,COUNT)\n offset += COUNT\n if not cont:\n return \n \ndef analyze_raw_reports_subset(offset,count):\n items = _collect_items(offset,count)\n if items:\n dump_items(items)\n return True\n return False\n\ndef dump_items(items):\n wifis = []\n locs = []\n for (idx,item) in enumerate(items):\n if idx % 100 == 0:\n print '%d/%d' % (idx,len(items))\n if 'wifi' in item.keys():\n report_dt = common.ot_utils.get_utc_time_from_timestamp(float(item['time'])/1000)\n m = models.Report(device_id=item['device_id'],timestamp=report_dt)\n m.save()\n item_loc = item.get('location_api')\n if item_loc:\n loc = models.LocationInfo(report=m,\n lat=item_loc['lat'],\n lon=item_loc['long'],\n provider=item_loc['provider'],\n timestamp = common.ot_utils.get_utc_time_from_timestamp(float(item_loc['time'])/1000),\n accuracy = item_loc['accuracy'])\n locs.append(loc)\n for wifi in item['wifi']:\n wifis.append(models.SingleWifiReport(SSID=wifi['SSID'],\n signal=wifi['signal'],\n frequency=wifi['frequency'],\n key=wifi['key'],\n report=m))\n print 'Saving all dependant objects'\n models.SingleWifiReport.objects.bulk_create(wifis)\n models.LocationInfo.objects.bulk_create(locs)\n \n \n\n\ndef delete_all_reports():\n common.ot_utils.delete_from_model(models.SingleWifiReport)\n common.ot_utils.delete_from_model(models.LocationInfo)\n common.ot_utils.delete_from_model(models.Report)\n \ndef _collect_items(offset,count):\n all_reports_count = reports.models.RawReport.objects.count()\n print '*** offset = %d count = %d all_reports_count = %d' % (offset,count,all_reports_count)\n all_reports = reports.models.RawReport.objects.all()[offset:offset+count]\n result = []\n for rj in all_reports:\n items = json.loads(rj.text)['items']\n result.extend(items)\n return result\n\n\n \n \n\n\n\n\n\n\n \n \n" ]
true
389
484d104a8481a707a187d0bcb30898c3459a88be
# -*- coding: utf-8 -*- from django.conf.urls import patterns, include, url from apps.virt.views import node, domain,device,cluster,home urlpatterns = patterns('', # Home url(r'^$', home.HomeView.as_view(), name='home'), # Cluster url(r'^cluster/status/$', cluster.ClusterStatusView.as_view(), name='cluster_status'), # Node url(r'^node/list/$', node.NodeListView.as_view(), name='node_list'), url(r'^node/add/$', node.NodeCreateView.as_view(), name='node_add'), url(r'^node/(?P<pk>\d+)/libvirt/$', node.NodeLibvirtView.as_view(), name='node_libvirt'), url(r'^node/(?P<pk>\d+)/libvirt/update/$', node.UpdateCapabilitiesView.as_view(), name='node_libvirt_update'), url(r'^node/(?P<pk>\d+)/libvirt/update/domains/$', node.UpdateDomainsView.as_view(), name='node_libvirt_updatedomains'), url(r'^node/(?P<pk>\d+)/libvirt/create/domains/$', node.CreateALLDomainsView.as_view(), name='node_libvirt_createdomains'), url(r'^node/(?P<pk>\d+)/edit/$', node.NodeUpdateView.as_view(), name='node_edit'), url(r'^node/(?P<pk>\d+)/delete/$', node.NodeDeleteView.as_view(), name='node_delete'), # Domain url(r'^domain/list/$', domain.DomainListView.as_view(), name='domain_list'), url(r'^domain/add/$', domain.DomainCreateView.as_view(), name='domain_add'), url(r'^domain/(?P<pk>\d+)/libvirt/$', domain.DomainLibvirtView.as_view(), name='domain_libvirt'), url(r'^domain/(?P<pk>\d+)/edit/$', domain.DomainUpdateView.as_view(), name='domain_edit'), url(r'^domain/(?P<pk>\d+)/delete/$', domain.DomainDeleteView.as_view(), name='domain_delete'), url(r'^domain/(?P<pk>\d+)/libvirt/create/$', domain.LibvirtCreateView.as_view(), name='domain_libvirt_create'), url(r'^domain/(?P<pk>\d+)/libvirt/reboot/$', domain.LibvirtRebootView.as_view(), name='domain_libvirt_reboot'), url(r'^domain/(?P<pk>\d+)/libvirt/shutdown/$', domain.LibvirtShutdownView.as_view(), name='domain_libvirt_shutdown'), url(r'^domain/(?P<pk>\d+)/libvirt/destroy/$', domain.LibvirtDestroyView.as_view(), name='domain_libvirt_destroy'), url(r'^domain/(?P<pk>\d+)/libvirt/migrate/(?P<node_pk>\d+)/$', domain.LibvirtMigrateView.as_view(), name='domain_libvirt_migrate'), url(r'^domain/(?P<pk>\d+)/libvirt/resume/$', domain.LibvirtResumeView.as_view(), name='domain_libvirt_resume'), url(r'^domain/(?P<pk>\d+)/libvirt/suspend/$', domain.LibvirtSuspendView.as_view(), name='domain_libvirt_suspend'), # Device url(r'^domain/(?P<pk>\d+)/device/(?P<type>\w+)/add/$', device.DeviceCreateView.as_view(), name="device_add"), url(r'^device/(?P<pk>\d+)/$', device.DeviceUpdateView.as_view(), name="device_edit"), url(r'^device/(?P<pk>\d+)/attach/$', device.DeviceAttachView.as_view(), name="device_attach"), url(r'^device/(?P<pk>\d+)/detach/$', device.DeviceDetachView.as_view(), name="device_detach"), url(r'^device/(?P<pk>\d+)/delete/$', device.DeviceDeleteView.as_view(), name="device_delete") )
[ "# -*- coding: utf-8 -*-\n\nfrom django.conf.urls import patterns, include, url\nfrom apps.virt.views import node, domain,device,cluster,home\n\nurlpatterns = patterns('',\n\n # Home \n url(r'^$', home.HomeView.as_view(), name='home'),\n\n # Cluster \n url(r'^cluster/status/$', cluster.ClusterStatusView.as_view(), name='cluster_status'),\n\n # Node\n url(r'^node/list/$', node.NodeListView.as_view(), name='node_list'),\n url(r'^node/add/$', node.NodeCreateView.as_view(), name='node_add'),\n url(r'^node/(?P<pk>\\d+)/libvirt/$', node.NodeLibvirtView.as_view(), name='node_libvirt'),\n url(r'^node/(?P<pk>\\d+)/libvirt/update/$', node.UpdateCapabilitiesView.as_view(), name='node_libvirt_update'),\n url(r'^node/(?P<pk>\\d+)/libvirt/update/domains/$', node.UpdateDomainsView.as_view(), name='node_libvirt_updatedomains'),\n url(r'^node/(?P<pk>\\d+)/libvirt/create/domains/$', node.CreateALLDomainsView.as_view(), name='node_libvirt_createdomains'),\n url(r'^node/(?P<pk>\\d+)/edit/$', node.NodeUpdateView.as_view(), name='node_edit'),\n url(r'^node/(?P<pk>\\d+)/delete/$', node.NodeDeleteView.as_view(), name='node_delete'),\n\n\n # Domain\n url(r'^domain/list/$', domain.DomainListView.as_view(), name='domain_list'),\n url(r'^domain/add/$', domain.DomainCreateView.as_view(), name='domain_add'),\n url(r'^domain/(?P<pk>\\d+)/libvirt/$', domain.DomainLibvirtView.as_view(), name='domain_libvirt'),\n url(r'^domain/(?P<pk>\\d+)/edit/$', domain.DomainUpdateView.as_view(), name='domain_edit'),\n url(r'^domain/(?P<pk>\\d+)/delete/$', domain.DomainDeleteView.as_view(), name='domain_delete'),\n \n url(r'^domain/(?P<pk>\\d+)/libvirt/create/$', domain.LibvirtCreateView.as_view(), name='domain_libvirt_create'),\n url(r'^domain/(?P<pk>\\d+)/libvirt/reboot/$', domain.LibvirtRebootView.as_view(), name='domain_libvirt_reboot'),\n url(r'^domain/(?P<pk>\\d+)/libvirt/shutdown/$', domain.LibvirtShutdownView.as_view(), name='domain_libvirt_shutdown'),\n url(r'^domain/(?P<pk>\\d+)/libvirt/destroy/$', domain.LibvirtDestroyView.as_view(), name='domain_libvirt_destroy'),\n\n url(r'^domain/(?P<pk>\\d+)/libvirt/migrate/(?P<node_pk>\\d+)/$', domain.LibvirtMigrateView.as_view(), name='domain_libvirt_migrate'),\n url(r'^domain/(?P<pk>\\d+)/libvirt/resume/$', domain.LibvirtResumeView.as_view(), name='domain_libvirt_resume'),\n url(r'^domain/(?P<pk>\\d+)/libvirt/suspend/$', domain.LibvirtSuspendView.as_view(), name='domain_libvirt_suspend'),\n\n\n # Device\n url(r'^domain/(?P<pk>\\d+)/device/(?P<type>\\w+)/add/$', device.DeviceCreateView.as_view(), name=\"device_add\"),\n url(r'^device/(?P<pk>\\d+)/$', device.DeviceUpdateView.as_view(), name=\"device_edit\"),\n url(r'^device/(?P<pk>\\d+)/attach/$', device.DeviceAttachView.as_view(), name=\"device_attach\"),\n url(r'^device/(?P<pk>\\d+)/detach/$', device.DeviceDetachView.as_view(), name=\"device_detach\"),\n url(r'^device/(?P<pk>\\d+)/delete/$', device.DeviceDeleteView.as_view(), name=\"device_delete\")\n\n\n)\n\n", "from django.conf.urls import patterns, include, url\nfrom apps.virt.views import node, domain, device, cluster, home\nurlpatterns = patterns('', url('^$', home.HomeView.as_view(), name='home'),\n url('^cluster/status/$', cluster.ClusterStatusView.as_view(), name=\n 'cluster_status'), url('^node/list/$', node.NodeListView.as_view(),\n name='node_list'), url('^node/add/$', node.NodeCreateView.as_view(),\n name='node_add'), url('^node/(?P<pk>\\\\d+)/libvirt/$', node.\n NodeLibvirtView.as_view(), name='node_libvirt'), url(\n '^node/(?P<pk>\\\\d+)/libvirt/update/$', node.UpdateCapabilitiesView.\n as_view(), name='node_libvirt_update'), url(\n '^node/(?P<pk>\\\\d+)/libvirt/update/domains/$', node.UpdateDomainsView.\n as_view(), name='node_libvirt_updatedomains'), url(\n '^node/(?P<pk>\\\\d+)/libvirt/create/domains/$', node.\n CreateALLDomainsView.as_view(), name='node_libvirt_createdomains'), url\n ('^node/(?P<pk>\\\\d+)/edit/$', node.NodeUpdateView.as_view(), name=\n 'node_edit'), url('^node/(?P<pk>\\\\d+)/delete/$', node.NodeDeleteView.\n as_view(), name='node_delete'), url('^domain/list/$', domain.\n DomainListView.as_view(), name='domain_list'), url('^domain/add/$',\n domain.DomainCreateView.as_view(), name='domain_add'), url(\n '^domain/(?P<pk>\\\\d+)/libvirt/$', domain.DomainLibvirtView.as_view(),\n name='domain_libvirt'), url('^domain/(?P<pk>\\\\d+)/edit/$', domain.\n DomainUpdateView.as_view(), name='domain_edit'), url(\n '^domain/(?P<pk>\\\\d+)/delete/$', domain.DomainDeleteView.as_view(),\n name='domain_delete'), url('^domain/(?P<pk>\\\\d+)/libvirt/create/$',\n domain.LibvirtCreateView.as_view(), name='domain_libvirt_create'), url(\n '^domain/(?P<pk>\\\\d+)/libvirt/reboot/$', domain.LibvirtRebootView.\n as_view(), name='domain_libvirt_reboot'), url(\n '^domain/(?P<pk>\\\\d+)/libvirt/shutdown/$', domain.LibvirtShutdownView.\n as_view(), name='domain_libvirt_shutdown'), url(\n '^domain/(?P<pk>\\\\d+)/libvirt/destroy/$', domain.LibvirtDestroyView.\n as_view(), name='domain_libvirt_destroy'), url(\n '^domain/(?P<pk>\\\\d+)/libvirt/migrate/(?P<node_pk>\\\\d+)/$', domain.\n LibvirtMigrateView.as_view(), name='domain_libvirt_migrate'), url(\n '^domain/(?P<pk>\\\\d+)/libvirt/resume/$', domain.LibvirtResumeView.\n as_view(), name='domain_libvirt_resume'), url(\n '^domain/(?P<pk>\\\\d+)/libvirt/suspend/$', domain.LibvirtSuspendView.\n as_view(), name='domain_libvirt_suspend'), url(\n '^domain/(?P<pk>\\\\d+)/device/(?P<type>\\\\w+)/add/$', device.\n DeviceCreateView.as_view(), name='device_add'), url(\n '^device/(?P<pk>\\\\d+)/$', device.DeviceUpdateView.as_view(), name=\n 'device_edit'), url('^device/(?P<pk>\\\\d+)/attach/$', device.\n DeviceAttachView.as_view(), name='device_attach'), url(\n '^device/(?P<pk>\\\\d+)/detach/$', device.DeviceDetachView.as_view(),\n name='device_detach'), url('^device/(?P<pk>\\\\d+)/delete/$', device.\n DeviceDeleteView.as_view(), name='device_delete'))\n", "<import token>\nurlpatterns = patterns('', url('^$', home.HomeView.as_view(), name='home'),\n url('^cluster/status/$', cluster.ClusterStatusView.as_view(), name=\n 'cluster_status'), url('^node/list/$', node.NodeListView.as_view(),\n name='node_list'), url('^node/add/$', node.NodeCreateView.as_view(),\n name='node_add'), url('^node/(?P<pk>\\\\d+)/libvirt/$', node.\n NodeLibvirtView.as_view(), name='node_libvirt'), url(\n '^node/(?P<pk>\\\\d+)/libvirt/update/$', node.UpdateCapabilitiesView.\n as_view(), name='node_libvirt_update'), url(\n '^node/(?P<pk>\\\\d+)/libvirt/update/domains/$', node.UpdateDomainsView.\n as_view(), name='node_libvirt_updatedomains'), url(\n '^node/(?P<pk>\\\\d+)/libvirt/create/domains/$', node.\n CreateALLDomainsView.as_view(), name='node_libvirt_createdomains'), url\n ('^node/(?P<pk>\\\\d+)/edit/$', node.NodeUpdateView.as_view(), name=\n 'node_edit'), url('^node/(?P<pk>\\\\d+)/delete/$', node.NodeDeleteView.\n as_view(), name='node_delete'), url('^domain/list/$', domain.\n DomainListView.as_view(), name='domain_list'), url('^domain/add/$',\n domain.DomainCreateView.as_view(), name='domain_add'), url(\n '^domain/(?P<pk>\\\\d+)/libvirt/$', domain.DomainLibvirtView.as_view(),\n name='domain_libvirt'), url('^domain/(?P<pk>\\\\d+)/edit/$', domain.\n DomainUpdateView.as_view(), name='domain_edit'), url(\n '^domain/(?P<pk>\\\\d+)/delete/$', domain.DomainDeleteView.as_view(),\n name='domain_delete'), url('^domain/(?P<pk>\\\\d+)/libvirt/create/$',\n domain.LibvirtCreateView.as_view(), name='domain_libvirt_create'), url(\n '^domain/(?P<pk>\\\\d+)/libvirt/reboot/$', domain.LibvirtRebootView.\n as_view(), name='domain_libvirt_reboot'), url(\n '^domain/(?P<pk>\\\\d+)/libvirt/shutdown/$', domain.LibvirtShutdownView.\n as_view(), name='domain_libvirt_shutdown'), url(\n '^domain/(?P<pk>\\\\d+)/libvirt/destroy/$', domain.LibvirtDestroyView.\n as_view(), name='domain_libvirt_destroy'), url(\n '^domain/(?P<pk>\\\\d+)/libvirt/migrate/(?P<node_pk>\\\\d+)/$', domain.\n LibvirtMigrateView.as_view(), name='domain_libvirt_migrate'), url(\n '^domain/(?P<pk>\\\\d+)/libvirt/resume/$', domain.LibvirtResumeView.\n as_view(), name='domain_libvirt_resume'), url(\n '^domain/(?P<pk>\\\\d+)/libvirt/suspend/$', domain.LibvirtSuspendView.\n as_view(), name='domain_libvirt_suspend'), url(\n '^domain/(?P<pk>\\\\d+)/device/(?P<type>\\\\w+)/add/$', device.\n DeviceCreateView.as_view(), name='device_add'), url(\n '^device/(?P<pk>\\\\d+)/$', device.DeviceUpdateView.as_view(), name=\n 'device_edit'), url('^device/(?P<pk>\\\\d+)/attach/$', device.\n DeviceAttachView.as_view(), name='device_attach'), url(\n '^device/(?P<pk>\\\\d+)/detach/$', device.DeviceDetachView.as_view(),\n name='device_detach'), url('^device/(?P<pk>\\\\d+)/delete/$', device.\n DeviceDeleteView.as_view(), name='device_delete'))\n", "<import token>\n<assignment token>\n" ]
false
390
545794cf4f0b2ab63b6a90951a78f8bdaca3c9e6
from collections import defaultdict as dd def grouping(w): d = dd(list) for k,v in ((len([y for y in x if y.isupper()]), x) for x in sorted(w,key=str.casefold)): d[k].append(v) return dict(sorted(d.items()))
[ "\nfrom collections import defaultdict as dd\ndef grouping(w):\n d = dd(list)\n for k,v in ((len([y for y in x if y.isupper()]), x) for x in sorted(w,key=str.casefold)):\n d[k].append(v)\n return dict(sorted(d.items()))\n\n", "from collections import defaultdict as dd\n\n\ndef grouping(w):\n d = dd(list)\n for k, v in ((len([y for y in x if y.isupper()]), x) for x in sorted(w,\n key=str.casefold)):\n d[k].append(v)\n return dict(sorted(d.items()))\n", "<import token>\n\n\ndef grouping(w):\n d = dd(list)\n for k, v in ((len([y for y in x if y.isupper()]), x) for x in sorted(w,\n key=str.casefold)):\n d[k].append(v)\n return dict(sorted(d.items()))\n", "<import token>\n<function token>\n" ]
false
391
ab844143ceddf32982682f5092762af0c97db577
from ..translators.translator import Translator
[ "from ..translators.translator import Translator\n", "<import token>\n" ]
false
392
0762c5bec2d796bb7888e3de45e29fb20f88f491
from starter2 import * from collections import defaultdict import scipy import colors import hair_dryer reload(hair_dryer) import three_loopers_u500 as TL import movie_frames def GE_pearson(this_looper,core_list=None): if core_list is None: core_list = np.unique(this_looper.tr.core_ids) name = this_looper.sim_name thtr=this_looper.tr mask = movie_frames.quantized_mask(this_looper).flatten() times=thtr.times[mask]+0 #the zero makes a copy times.shape=times.size,1 times=times/colors.tff G = colors.G #gx = thtr.track_dict['grav_x'] #gy = thtr.track_dict['grav_y'] #gz = thtr.track_dict['grav_z'] #GE2 = -1/(8*np.pi)*(gx*gx+gy*gy+gz*gz) #ge_min=GE2.min() #ge_max=GE2.max() PearsonR = np.zeros([len(core_list), len(times)]) PearsonP = np.zeros([len(core_list), len(times)]) PearsonRho = np.zeros([len(core_list), len(times)]) PeakRho = np.zeros([len(core_list), len(times)]) for nc, core_id in enumerate(core_list): print('GE pearson %s %d'%(name,core_id)) ms = trackage.mini_scrubber(thtr,core_id, do_velocity=False) #ms.particle_pos(core_id) if ms.nparticles < 1000: sl=slice(None) c=[0.5]*4 else: sl = slice(None,None,10) #c=[0,0,0,0.1] c=[0.1]*4 rho = ms.density[sl] rho = rho[:,mask] PeakRho[nc,:]=rho.max(axis=0) gx = thtr.c([core_id],'grav_x')[sl][:,mask] gy = thtr.c([core_id],'grav_y')[sl][:,mask] gz = thtr.c([core_id],'grav_z')[sl][:,mask] GE2 = 1/(8*np.pi*G)*(gx*gx+gy*gy+gz*gz) RRR = ms.r[sl][:,mask] for n in range(GE2.shape[1]): the_x=np.log(RRR[:,n]) the_y=np.log(GE2[:,n]) #the_y=rho[:,n] r,p=scipy.stats.pearsonr(the_x,the_y) PearsonR[nc,n]=r PearsonP[nc,n]=p the_y=np.log(rho[:,n]) r,p=scipy.stats.pearsonr(the_x,the_y) PearsonRho[nc,n]=r if 0: fig,ax=plt.subplots(1,2) ax[0].plot(times,PearsonR) #ax[0].boxplot(PearsonR) #ax[1].boxplot(PearsonRho) fig.savefig('plots_to_sort/phi_box_%s.png'%name) return {'PR':PearsonR, 'PP':PearsonP, 'Prho':PearsonRho, 'T':times, 'PeakRho':PeakRho} if 0: fig,ax=plt.subplots(1,1) ax.plot(times , GE2, c=c, linewidth=0.1) axbonk(ax,xlabel=r'$t/t_{ff}$', ylabel=r'$(\nabla \phi)^2/8 pi G$',yscale='log', ylim=[ge_min,ge_max]) ax2=ax.twinx() c=[1.0,0.1,0.1,0.1] ax2.plot(times , rho, c=c, linewidth=0.1) axbonk(ax2,xlabel=r'$t/t_{ff}$', ylabel=r'$\rho$',yscale='log') outname='plots_to_sort/%s_GE_t_c%04d.png'%(this_looper.sim_name,core_id) fig.savefig(outname) print(outname) sims=['u501', 'u502','u503'] if 'stuff' not in dir(): stuff={} for sim in sims: core_list = np.unique(TL.loops[sim].tr.core_ids) #core_list=core_list[:10] stuff[sim] = GE_pearson(TL.loops[sim],core_list=core_list) if 1: for sim in stuff: fig,ax=plt.subplots(1,1) T = stuff[sim]['T'] rho=stuff[sim]['PeakRho'] Rphi=stuff[sim]['PR'] ax.plot(Rphi.transpose() ,rho.transpose(),c=[0.1]*4) axbonk(ax,xlabel='time',ylabel='rho max', yscale='log') fig.savefig('plots_to_sort/peak_rho_pearson_phi%s.png'%sim) if 1: for sim in stuff: fig,ax=plt.subplots(1,1) T = stuff[sim]['T'] rho=stuff[sim]['PeakRho'] ax.plot(T,rho.transpose(),c=[0.1]*4) axbonk(ax,xlabel='time',ylabel='rho max', yscale='log') fig.savefig('plots_to_sort/peak_rho_%s.png'%sim) if 0: for sim in stuff: fig,ax=plt.subplots(1,1) c=[0.1]*4 #ax.plot( stuff[sim]['T'], stuff[sim]['PR'].transpose(),c=c) #ax.scatter( stuff[sim]['Prho'].transpose(), stuff[sim]['PR'].transpose(),c=c) XX,YY= stuff[sim]['Prho'].flatten(), stuff[sim]['PR'].flatten() ok = (~np.isnan(XX))*(~np.isnan(YY)) XX=XX[ok] YY=YY[ok] xbins = np.linspace( XX.min(), XX.max(), 64) ybins = np.linspace( YY.min(), YY.max(), 64) hist, xb, yb = np.histogram2d(XX,YY, bins=[xbins,ybins]) import pcolormesh_helper as pch pch.helper(hist,xb,yb,ax=ax) fig.savefig('plots_to_sort/RGE_Rrho_%s.png'%sim) if 1: for sim in stuff: fig,ax=plt.subplots(1,2) Rphi = stuff[sim]['PR'] ax[0].boxplot( Rphi ) ax[0].plot( Rphi.mean(axis=0)) ax[1].boxplot( stuff[sim]['Prho']) axbonk(ax[0],xlabel='frame',ylabel='Rgrad phi') axbonk(ax[1],xlabel='frame',ylabel='R rho') fig.savefig('plots_to_sort/Boxes_%s.png'%(sim)) if 0: from scipy.ndimage import gaussian_filter fig,ax=plt.subplots() for sim in stuff: Rphi = stuff[sim]['PR'] Rrho = stuff[sim]['Prho'] ax.plot( gaussian_filter(Rphi.mean(axis=0),1), colors.color[sim] +'--') ax.plot( Rrho.mean(axis=0), colors.color[sim]) axbonk(ax,xlabel='frame',ylabel='Rgrad phi') fig.savefig('plots_to_sort/MeanR_%s.png'%(sim))
[ "\nfrom starter2 import *\nfrom collections import defaultdict\nimport scipy\nimport colors\n\nimport hair_dryer\nreload(hair_dryer)\n\nimport three_loopers_u500 as TL\nimport movie_frames \n\ndef GE_pearson(this_looper,core_list=None):\n\n if core_list is None:\n core_list = np.unique(this_looper.tr.core_ids)\n\n name = this_looper.sim_name\n thtr=this_looper.tr\n mask = movie_frames.quantized_mask(this_looper).flatten()\n times=thtr.times[mask]+0 #the zero makes a copy\n times.shape=times.size,1\n times=times/colors.tff\n G = colors.G\n #gx = thtr.track_dict['grav_x']\n #gy = thtr.track_dict['grav_y']\n #gz = thtr.track_dict['grav_z']\n #GE2 = -1/(8*np.pi)*(gx*gx+gy*gy+gz*gz)\n #ge_min=GE2.min()\n #ge_max=GE2.max()\n PearsonR = np.zeros([len(core_list), len(times)])\n PearsonP = np.zeros([len(core_list), len(times)])\n PearsonRho = np.zeros([len(core_list), len(times)])\n PeakRho = np.zeros([len(core_list), len(times)])\n for nc, core_id in enumerate(core_list):\n print('GE pearson %s %d'%(name,core_id))\n\n \n ms = trackage.mini_scrubber(thtr,core_id, do_velocity=False)\n #ms.particle_pos(core_id)\n\n if ms.nparticles < 1000:\n sl=slice(None)\n c=[0.5]*4\n else:\n sl = slice(None,None,10)\n #c=[0,0,0,0.1]\n c=[0.1]*4\n\n rho = ms.density[sl]\n rho = rho[:,mask]\n\n PeakRho[nc,:]=rho.max(axis=0)\n\n gx = thtr.c([core_id],'grav_x')[sl][:,mask]\n gy = thtr.c([core_id],'grav_y')[sl][:,mask]\n gz = thtr.c([core_id],'grav_z')[sl][:,mask]\n GE2 = 1/(8*np.pi*G)*(gx*gx+gy*gy+gz*gz)\n\n RRR = ms.r[sl][:,mask]\n for n in range(GE2.shape[1]):\n the_x=np.log(RRR[:,n])\n the_y=np.log(GE2[:,n])\n #the_y=rho[:,n]\n r,p=scipy.stats.pearsonr(the_x,the_y)\n PearsonR[nc,n]=r\n PearsonP[nc,n]=p\n the_y=np.log(rho[:,n])\n r,p=scipy.stats.pearsonr(the_x,the_y)\n PearsonRho[nc,n]=r\n \n if 0:\n fig,ax=plt.subplots(1,2)\n ax[0].plot(times,PearsonR)\n #ax[0].boxplot(PearsonR)\n #ax[1].boxplot(PearsonRho)\n fig.savefig('plots_to_sort/phi_box_%s.png'%name)\n\n return {'PR':PearsonR, 'PP':PearsonP, 'Prho':PearsonRho, 'T':times, 'PeakRho':PeakRho}\n\n\n\n if 0:\n fig,ax=plt.subplots(1,1)\n ax.plot(times , GE2, c=c, linewidth=0.1)\n axbonk(ax,xlabel=r'$t/t_{ff}$', ylabel=r'$(\\nabla \\phi)^2/8 pi G$',yscale='log', ylim=[ge_min,ge_max])\n ax2=ax.twinx()\n c=[1.0,0.1,0.1,0.1]\n ax2.plot(times , rho, c=c, linewidth=0.1)\n axbonk(ax2,xlabel=r'$t/t_{ff}$', ylabel=r'$\\rho$',yscale='log')\n\n outname='plots_to_sort/%s_GE_t_c%04d.png'%(this_looper.sim_name,core_id)\n fig.savefig(outname)\n print(outname)\n\n\n\nsims=['u501', 'u502','u503']\nif 'stuff' not in dir():\n stuff={}\n for sim in sims:\n core_list = np.unique(TL.loops[sim].tr.core_ids)\n #core_list=core_list[:10]\n stuff[sim] = GE_pearson(TL.loops[sim],core_list=core_list)\n\nif 1:\n for sim in stuff:\n fig,ax=plt.subplots(1,1)\n T = stuff[sim]['T']\n rho=stuff[sim]['PeakRho']\n Rphi=stuff[sim]['PR']\n ax.plot(Rphi.transpose() ,rho.transpose(),c=[0.1]*4)\n axbonk(ax,xlabel='time',ylabel='rho max', yscale='log')\n fig.savefig('plots_to_sort/peak_rho_pearson_phi%s.png'%sim)\n\nif 1:\n for sim in stuff:\n fig,ax=plt.subplots(1,1)\n T = stuff[sim]['T']\n rho=stuff[sim]['PeakRho']\n ax.plot(T,rho.transpose(),c=[0.1]*4)\n axbonk(ax,xlabel='time',ylabel='rho max', yscale='log')\n fig.savefig('plots_to_sort/peak_rho_%s.png'%sim)\n\nif 0:\n for sim in stuff:\n fig,ax=plt.subplots(1,1)\n c=[0.1]*4\n #ax.plot( stuff[sim]['T'], stuff[sim]['PR'].transpose(),c=c)\n #ax.scatter( stuff[sim]['Prho'].transpose(), stuff[sim]['PR'].transpose(),c=c)\n XX,YY= stuff[sim]['Prho'].flatten(), stuff[sim]['PR'].flatten()\n ok = (~np.isnan(XX))*(~np.isnan(YY))\n XX=XX[ok]\n YY=YY[ok]\n xbins = np.linspace( XX.min(), XX.max(), 64)\n ybins = np.linspace( YY.min(), YY.max(), 64)\n hist, xb, yb = np.histogram2d(XX,YY, bins=[xbins,ybins])\n import pcolormesh_helper as pch\n pch.helper(hist,xb,yb,ax=ax)\n fig.savefig('plots_to_sort/RGE_Rrho_%s.png'%sim)\n\nif 1:\n for sim in stuff:\n fig,ax=plt.subplots(1,2)\n Rphi = stuff[sim]['PR']\n ax[0].boxplot( Rphi )\n ax[0].plot( Rphi.mean(axis=0))\n ax[1].boxplot( stuff[sim]['Prho'])\n\n\n axbonk(ax[0],xlabel='frame',ylabel='Rgrad phi')\n axbonk(ax[1],xlabel='frame',ylabel='R rho')\n fig.savefig('plots_to_sort/Boxes_%s.png'%(sim))\n\n\nif 0:\n from scipy.ndimage import gaussian_filter\n fig,ax=plt.subplots()\n for sim in stuff:\n Rphi = stuff[sim]['PR']\n Rrho = stuff[sim]['Prho']\n ax.plot( gaussian_filter(Rphi.mean(axis=0),1), colors.color[sim] +'--')\n ax.plot( Rrho.mean(axis=0), colors.color[sim])\n\n\n axbonk(ax,xlabel='frame',ylabel='Rgrad phi')\n fig.savefig('plots_to_sort/MeanR_%s.png'%(sim))\n\n", "from starter2 import *\nfrom collections import defaultdict\nimport scipy\nimport colors\nimport hair_dryer\nreload(hair_dryer)\nimport three_loopers_u500 as TL\nimport movie_frames\n\n\ndef GE_pearson(this_looper, core_list=None):\n if core_list is None:\n core_list = np.unique(this_looper.tr.core_ids)\n name = this_looper.sim_name\n thtr = this_looper.tr\n mask = movie_frames.quantized_mask(this_looper).flatten()\n times = thtr.times[mask] + 0\n times.shape = times.size, 1\n times = times / colors.tff\n G = colors.G\n PearsonR = np.zeros([len(core_list), len(times)])\n PearsonP = np.zeros([len(core_list), len(times)])\n PearsonRho = np.zeros([len(core_list), len(times)])\n PeakRho = np.zeros([len(core_list), len(times)])\n for nc, core_id in enumerate(core_list):\n print('GE pearson %s %d' % (name, core_id))\n ms = trackage.mini_scrubber(thtr, core_id, do_velocity=False)\n if ms.nparticles < 1000:\n sl = slice(None)\n c = [0.5] * 4\n else:\n sl = slice(None, None, 10)\n c = [0.1] * 4\n rho = ms.density[sl]\n rho = rho[:, mask]\n PeakRho[nc, :] = rho.max(axis=0)\n gx = thtr.c([core_id], 'grav_x')[sl][:, mask]\n gy = thtr.c([core_id], 'grav_y')[sl][:, mask]\n gz = thtr.c([core_id], 'grav_z')[sl][:, mask]\n GE2 = 1 / (8 * np.pi * G) * (gx * gx + gy * gy + gz * gz)\n RRR = ms.r[sl][:, mask]\n for n in range(GE2.shape[1]):\n the_x = np.log(RRR[:, n])\n the_y = np.log(GE2[:, n])\n r, p = scipy.stats.pearsonr(the_x, the_y)\n PearsonR[nc, n] = r\n PearsonP[nc, n] = p\n the_y = np.log(rho[:, n])\n r, p = scipy.stats.pearsonr(the_x, the_y)\n PearsonRho[nc, n] = r\n if 0:\n fig, ax = plt.subplots(1, 2)\n ax[0].plot(times, PearsonR)\n fig.savefig('plots_to_sort/phi_box_%s.png' % name)\n return {'PR': PearsonR, 'PP': PearsonP, 'Prho': PearsonRho, 'T': times,\n 'PeakRho': PeakRho}\n if 0:\n fig, ax = plt.subplots(1, 1)\n ax.plot(times, GE2, c=c, linewidth=0.1)\n axbonk(ax, xlabel='$t/t_{ff}$', ylabel='$(\\\\nabla \\\\phi)^2/8 pi G$',\n yscale='log', ylim=[ge_min, ge_max])\n ax2 = ax.twinx()\n c = [1.0, 0.1, 0.1, 0.1]\n ax2.plot(times, rho, c=c, linewidth=0.1)\n axbonk(ax2, xlabel='$t/t_{ff}$', ylabel='$\\\\rho$', yscale='log')\n outname = 'plots_to_sort/%s_GE_t_c%04d.png' % (this_looper.sim_name,\n core_id)\n fig.savefig(outname)\n print(outname)\n\n\nsims = ['u501', 'u502', 'u503']\nif 'stuff' not in dir():\n stuff = {}\n for sim in sims:\n core_list = np.unique(TL.loops[sim].tr.core_ids)\n stuff[sim] = GE_pearson(TL.loops[sim], core_list=core_list)\nif 1:\n for sim in stuff:\n fig, ax = plt.subplots(1, 1)\n T = stuff[sim]['T']\n rho = stuff[sim]['PeakRho']\n Rphi = stuff[sim]['PR']\n ax.plot(Rphi.transpose(), rho.transpose(), c=[0.1] * 4)\n axbonk(ax, xlabel='time', ylabel='rho max', yscale='log')\n fig.savefig('plots_to_sort/peak_rho_pearson_phi%s.png' % sim)\nif 1:\n for sim in stuff:\n fig, ax = plt.subplots(1, 1)\n T = stuff[sim]['T']\n rho = stuff[sim]['PeakRho']\n ax.plot(T, rho.transpose(), c=[0.1] * 4)\n axbonk(ax, xlabel='time', ylabel='rho max', yscale='log')\n fig.savefig('plots_to_sort/peak_rho_%s.png' % sim)\nif 0:\n for sim in stuff:\n fig, ax = plt.subplots(1, 1)\n c = [0.1] * 4\n XX, YY = stuff[sim]['Prho'].flatten(), stuff[sim]['PR'].flatten()\n ok = ~np.isnan(XX) * ~np.isnan(YY)\n XX = XX[ok]\n YY = YY[ok]\n xbins = np.linspace(XX.min(), XX.max(), 64)\n ybins = np.linspace(YY.min(), YY.max(), 64)\n hist, xb, yb = np.histogram2d(XX, YY, bins=[xbins, ybins])\n import pcolormesh_helper as pch\n pch.helper(hist, xb, yb, ax=ax)\n fig.savefig('plots_to_sort/RGE_Rrho_%s.png' % sim)\nif 1:\n for sim in stuff:\n fig, ax = plt.subplots(1, 2)\n Rphi = stuff[sim]['PR']\n ax[0].boxplot(Rphi)\n ax[0].plot(Rphi.mean(axis=0))\n ax[1].boxplot(stuff[sim]['Prho'])\n axbonk(ax[0], xlabel='frame', ylabel='Rgrad phi')\n axbonk(ax[1], xlabel='frame', ylabel='R rho')\n fig.savefig('plots_to_sort/Boxes_%s.png' % sim)\nif 0:\n from scipy.ndimage import gaussian_filter\n fig, ax = plt.subplots()\n for sim in stuff:\n Rphi = stuff[sim]['PR']\n Rrho = stuff[sim]['Prho']\n ax.plot(gaussian_filter(Rphi.mean(axis=0), 1), colors.color[sim] + '--'\n )\n ax.plot(Rrho.mean(axis=0), colors.color[sim])\n axbonk(ax, xlabel='frame', ylabel='Rgrad phi')\n fig.savefig('plots_to_sort/MeanR_%s.png' % sim)\n", "<import token>\nreload(hair_dryer)\n<import token>\n\n\ndef GE_pearson(this_looper, core_list=None):\n if core_list is None:\n core_list = np.unique(this_looper.tr.core_ids)\n name = this_looper.sim_name\n thtr = this_looper.tr\n mask = movie_frames.quantized_mask(this_looper).flatten()\n times = thtr.times[mask] + 0\n times.shape = times.size, 1\n times = times / colors.tff\n G = colors.G\n PearsonR = np.zeros([len(core_list), len(times)])\n PearsonP = np.zeros([len(core_list), len(times)])\n PearsonRho = np.zeros([len(core_list), len(times)])\n PeakRho = np.zeros([len(core_list), len(times)])\n for nc, core_id in enumerate(core_list):\n print('GE pearson %s %d' % (name, core_id))\n ms = trackage.mini_scrubber(thtr, core_id, do_velocity=False)\n if ms.nparticles < 1000:\n sl = slice(None)\n c = [0.5] * 4\n else:\n sl = slice(None, None, 10)\n c = [0.1] * 4\n rho = ms.density[sl]\n rho = rho[:, mask]\n PeakRho[nc, :] = rho.max(axis=0)\n gx = thtr.c([core_id], 'grav_x')[sl][:, mask]\n gy = thtr.c([core_id], 'grav_y')[sl][:, mask]\n gz = thtr.c([core_id], 'grav_z')[sl][:, mask]\n GE2 = 1 / (8 * np.pi * G) * (gx * gx + gy * gy + gz * gz)\n RRR = ms.r[sl][:, mask]\n for n in range(GE2.shape[1]):\n the_x = np.log(RRR[:, n])\n the_y = np.log(GE2[:, n])\n r, p = scipy.stats.pearsonr(the_x, the_y)\n PearsonR[nc, n] = r\n PearsonP[nc, n] = p\n the_y = np.log(rho[:, n])\n r, p = scipy.stats.pearsonr(the_x, the_y)\n PearsonRho[nc, n] = r\n if 0:\n fig, ax = plt.subplots(1, 2)\n ax[0].plot(times, PearsonR)\n fig.savefig('plots_to_sort/phi_box_%s.png' % name)\n return {'PR': PearsonR, 'PP': PearsonP, 'Prho': PearsonRho, 'T': times,\n 'PeakRho': PeakRho}\n if 0:\n fig, ax = plt.subplots(1, 1)\n ax.plot(times, GE2, c=c, linewidth=0.1)\n axbonk(ax, xlabel='$t/t_{ff}$', ylabel='$(\\\\nabla \\\\phi)^2/8 pi G$',\n yscale='log', ylim=[ge_min, ge_max])\n ax2 = ax.twinx()\n c = [1.0, 0.1, 0.1, 0.1]\n ax2.plot(times, rho, c=c, linewidth=0.1)\n axbonk(ax2, xlabel='$t/t_{ff}$', ylabel='$\\\\rho$', yscale='log')\n outname = 'plots_to_sort/%s_GE_t_c%04d.png' % (this_looper.sim_name,\n core_id)\n fig.savefig(outname)\n print(outname)\n\n\nsims = ['u501', 'u502', 'u503']\nif 'stuff' not in dir():\n stuff = {}\n for sim in sims:\n core_list = np.unique(TL.loops[sim].tr.core_ids)\n stuff[sim] = GE_pearson(TL.loops[sim], core_list=core_list)\nif 1:\n for sim in stuff:\n fig, ax = plt.subplots(1, 1)\n T = stuff[sim]['T']\n rho = stuff[sim]['PeakRho']\n Rphi = stuff[sim]['PR']\n ax.plot(Rphi.transpose(), rho.transpose(), c=[0.1] * 4)\n axbonk(ax, xlabel='time', ylabel='rho max', yscale='log')\n fig.savefig('plots_to_sort/peak_rho_pearson_phi%s.png' % sim)\nif 1:\n for sim in stuff:\n fig, ax = plt.subplots(1, 1)\n T = stuff[sim]['T']\n rho = stuff[sim]['PeakRho']\n ax.plot(T, rho.transpose(), c=[0.1] * 4)\n axbonk(ax, xlabel='time', ylabel='rho max', yscale='log')\n fig.savefig('plots_to_sort/peak_rho_%s.png' % sim)\nif 0:\n for sim in stuff:\n fig, ax = plt.subplots(1, 1)\n c = [0.1] * 4\n XX, YY = stuff[sim]['Prho'].flatten(), stuff[sim]['PR'].flatten()\n ok = ~np.isnan(XX) * ~np.isnan(YY)\n XX = XX[ok]\n YY = YY[ok]\n xbins = np.linspace(XX.min(), XX.max(), 64)\n ybins = np.linspace(YY.min(), YY.max(), 64)\n hist, xb, yb = np.histogram2d(XX, YY, bins=[xbins, ybins])\n import pcolormesh_helper as pch\n pch.helper(hist, xb, yb, ax=ax)\n fig.savefig('plots_to_sort/RGE_Rrho_%s.png' % sim)\nif 1:\n for sim in stuff:\n fig, ax = plt.subplots(1, 2)\n Rphi = stuff[sim]['PR']\n ax[0].boxplot(Rphi)\n ax[0].plot(Rphi.mean(axis=0))\n ax[1].boxplot(stuff[sim]['Prho'])\n axbonk(ax[0], xlabel='frame', ylabel='Rgrad phi')\n axbonk(ax[1], xlabel='frame', ylabel='R rho')\n fig.savefig('plots_to_sort/Boxes_%s.png' % sim)\nif 0:\n from scipy.ndimage import gaussian_filter\n fig, ax = plt.subplots()\n for sim in stuff:\n Rphi = stuff[sim]['PR']\n Rrho = stuff[sim]['Prho']\n ax.plot(gaussian_filter(Rphi.mean(axis=0), 1), colors.color[sim] + '--'\n )\n ax.plot(Rrho.mean(axis=0), colors.color[sim])\n axbonk(ax, xlabel='frame', ylabel='Rgrad phi')\n fig.savefig('plots_to_sort/MeanR_%s.png' % sim)\n", "<import token>\nreload(hair_dryer)\n<import token>\n\n\ndef GE_pearson(this_looper, core_list=None):\n if core_list is None:\n core_list = np.unique(this_looper.tr.core_ids)\n name = this_looper.sim_name\n thtr = this_looper.tr\n mask = movie_frames.quantized_mask(this_looper).flatten()\n times = thtr.times[mask] + 0\n times.shape = times.size, 1\n times = times / colors.tff\n G = colors.G\n PearsonR = np.zeros([len(core_list), len(times)])\n PearsonP = np.zeros([len(core_list), len(times)])\n PearsonRho = np.zeros([len(core_list), len(times)])\n PeakRho = np.zeros([len(core_list), len(times)])\n for nc, core_id in enumerate(core_list):\n print('GE pearson %s %d' % (name, core_id))\n ms = trackage.mini_scrubber(thtr, core_id, do_velocity=False)\n if ms.nparticles < 1000:\n sl = slice(None)\n c = [0.5] * 4\n else:\n sl = slice(None, None, 10)\n c = [0.1] * 4\n rho = ms.density[sl]\n rho = rho[:, mask]\n PeakRho[nc, :] = rho.max(axis=0)\n gx = thtr.c([core_id], 'grav_x')[sl][:, mask]\n gy = thtr.c([core_id], 'grav_y')[sl][:, mask]\n gz = thtr.c([core_id], 'grav_z')[sl][:, mask]\n GE2 = 1 / (8 * np.pi * G) * (gx * gx + gy * gy + gz * gz)\n RRR = ms.r[sl][:, mask]\n for n in range(GE2.shape[1]):\n the_x = np.log(RRR[:, n])\n the_y = np.log(GE2[:, n])\n r, p = scipy.stats.pearsonr(the_x, the_y)\n PearsonR[nc, n] = r\n PearsonP[nc, n] = p\n the_y = np.log(rho[:, n])\n r, p = scipy.stats.pearsonr(the_x, the_y)\n PearsonRho[nc, n] = r\n if 0:\n fig, ax = plt.subplots(1, 2)\n ax[0].plot(times, PearsonR)\n fig.savefig('plots_to_sort/phi_box_%s.png' % name)\n return {'PR': PearsonR, 'PP': PearsonP, 'Prho': PearsonRho, 'T': times,\n 'PeakRho': PeakRho}\n if 0:\n fig, ax = plt.subplots(1, 1)\n ax.plot(times, GE2, c=c, linewidth=0.1)\n axbonk(ax, xlabel='$t/t_{ff}$', ylabel='$(\\\\nabla \\\\phi)^2/8 pi G$',\n yscale='log', ylim=[ge_min, ge_max])\n ax2 = ax.twinx()\n c = [1.0, 0.1, 0.1, 0.1]\n ax2.plot(times, rho, c=c, linewidth=0.1)\n axbonk(ax2, xlabel='$t/t_{ff}$', ylabel='$\\\\rho$', yscale='log')\n outname = 'plots_to_sort/%s_GE_t_c%04d.png' % (this_looper.sim_name,\n core_id)\n fig.savefig(outname)\n print(outname)\n\n\n<assignment token>\nif 'stuff' not in dir():\n stuff = {}\n for sim in sims:\n core_list = np.unique(TL.loops[sim].tr.core_ids)\n stuff[sim] = GE_pearson(TL.loops[sim], core_list=core_list)\nif 1:\n for sim in stuff:\n fig, ax = plt.subplots(1, 1)\n T = stuff[sim]['T']\n rho = stuff[sim]['PeakRho']\n Rphi = stuff[sim]['PR']\n ax.plot(Rphi.transpose(), rho.transpose(), c=[0.1] * 4)\n axbonk(ax, xlabel='time', ylabel='rho max', yscale='log')\n fig.savefig('plots_to_sort/peak_rho_pearson_phi%s.png' % sim)\nif 1:\n for sim in stuff:\n fig, ax = plt.subplots(1, 1)\n T = stuff[sim]['T']\n rho = stuff[sim]['PeakRho']\n ax.plot(T, rho.transpose(), c=[0.1] * 4)\n axbonk(ax, xlabel='time', ylabel='rho max', yscale='log')\n fig.savefig('plots_to_sort/peak_rho_%s.png' % sim)\nif 0:\n for sim in stuff:\n fig, ax = plt.subplots(1, 1)\n c = [0.1] * 4\n XX, YY = stuff[sim]['Prho'].flatten(), stuff[sim]['PR'].flatten()\n ok = ~np.isnan(XX) * ~np.isnan(YY)\n XX = XX[ok]\n YY = YY[ok]\n xbins = np.linspace(XX.min(), XX.max(), 64)\n ybins = np.linspace(YY.min(), YY.max(), 64)\n hist, xb, yb = np.histogram2d(XX, YY, bins=[xbins, ybins])\n import pcolormesh_helper as pch\n pch.helper(hist, xb, yb, ax=ax)\n fig.savefig('plots_to_sort/RGE_Rrho_%s.png' % sim)\nif 1:\n for sim in stuff:\n fig, ax = plt.subplots(1, 2)\n Rphi = stuff[sim]['PR']\n ax[0].boxplot(Rphi)\n ax[0].plot(Rphi.mean(axis=0))\n ax[1].boxplot(stuff[sim]['Prho'])\n axbonk(ax[0], xlabel='frame', ylabel='Rgrad phi')\n axbonk(ax[1], xlabel='frame', ylabel='R rho')\n fig.savefig('plots_to_sort/Boxes_%s.png' % sim)\nif 0:\n from scipy.ndimage import gaussian_filter\n fig, ax = plt.subplots()\n for sim in stuff:\n Rphi = stuff[sim]['PR']\n Rrho = stuff[sim]['Prho']\n ax.plot(gaussian_filter(Rphi.mean(axis=0), 1), colors.color[sim] + '--'\n )\n ax.plot(Rrho.mean(axis=0), colors.color[sim])\n axbonk(ax, xlabel='frame', ylabel='Rgrad phi')\n fig.savefig('plots_to_sort/MeanR_%s.png' % sim)\n", "<import token>\n<code token>\n<import token>\n\n\ndef GE_pearson(this_looper, core_list=None):\n if core_list is None:\n core_list = np.unique(this_looper.tr.core_ids)\n name = this_looper.sim_name\n thtr = this_looper.tr\n mask = movie_frames.quantized_mask(this_looper).flatten()\n times = thtr.times[mask] + 0\n times.shape = times.size, 1\n times = times / colors.tff\n G = colors.G\n PearsonR = np.zeros([len(core_list), len(times)])\n PearsonP = np.zeros([len(core_list), len(times)])\n PearsonRho = np.zeros([len(core_list), len(times)])\n PeakRho = np.zeros([len(core_list), len(times)])\n for nc, core_id in enumerate(core_list):\n print('GE pearson %s %d' % (name, core_id))\n ms = trackage.mini_scrubber(thtr, core_id, do_velocity=False)\n if ms.nparticles < 1000:\n sl = slice(None)\n c = [0.5] * 4\n else:\n sl = slice(None, None, 10)\n c = [0.1] * 4\n rho = ms.density[sl]\n rho = rho[:, mask]\n PeakRho[nc, :] = rho.max(axis=0)\n gx = thtr.c([core_id], 'grav_x')[sl][:, mask]\n gy = thtr.c([core_id], 'grav_y')[sl][:, mask]\n gz = thtr.c([core_id], 'grav_z')[sl][:, mask]\n GE2 = 1 / (8 * np.pi * G) * (gx * gx + gy * gy + gz * gz)\n RRR = ms.r[sl][:, mask]\n for n in range(GE2.shape[1]):\n the_x = np.log(RRR[:, n])\n the_y = np.log(GE2[:, n])\n r, p = scipy.stats.pearsonr(the_x, the_y)\n PearsonR[nc, n] = r\n PearsonP[nc, n] = p\n the_y = np.log(rho[:, n])\n r, p = scipy.stats.pearsonr(the_x, the_y)\n PearsonRho[nc, n] = r\n if 0:\n fig, ax = plt.subplots(1, 2)\n ax[0].plot(times, PearsonR)\n fig.savefig('plots_to_sort/phi_box_%s.png' % name)\n return {'PR': PearsonR, 'PP': PearsonP, 'Prho': PearsonRho, 'T': times,\n 'PeakRho': PeakRho}\n if 0:\n fig, ax = plt.subplots(1, 1)\n ax.plot(times, GE2, c=c, linewidth=0.1)\n axbonk(ax, xlabel='$t/t_{ff}$', ylabel='$(\\\\nabla \\\\phi)^2/8 pi G$',\n yscale='log', ylim=[ge_min, ge_max])\n ax2 = ax.twinx()\n c = [1.0, 0.1, 0.1, 0.1]\n ax2.plot(times, rho, c=c, linewidth=0.1)\n axbonk(ax2, xlabel='$t/t_{ff}$', ylabel='$\\\\rho$', yscale='log')\n outname = 'plots_to_sort/%s_GE_t_c%04d.png' % (this_looper.sim_name,\n core_id)\n fig.savefig(outname)\n print(outname)\n\n\n<assignment token>\n<code token>\n", "<import token>\n<code token>\n<import token>\n<function token>\n<assignment token>\n<code token>\n" ]
false
393
7bbbd30ba1578c1165ccf5c2fff22609c16dfd64
""" Cores no terminal """ a = 3 b = 5 print('Os valores são \033[32m{}\033[m e \033[31m{}\033[m !!!'.format(a, b)) # Dicionário de cores: nome = 'Kátia' cores = {'limpa':'\033]m', 'azul':'\033[34m', 'amarelo':'\033[33m', 'pretoebranco':'\033[7;30m'} print('Prazer em te conhecer, {}{}{}!!!'.format(cores['azul'], nome, cores['amarelo'])) # dá pra colocar as cores dentro das chaves tb.
[ "\"\"\"\r\nCores no terminal\r\n\"\"\"\r\n\r\na = 3\r\nb = 5\r\nprint('Os valores são \\033[32m{}\\033[m e \\033[31m{}\\033[m !!!'.format(a, b))\r\n\r\n# Dicionário de cores:\r\nnome = 'Kátia'\r\ncores = {'limpa':'\\033]m',\r\n 'azul':'\\033[34m',\r\n 'amarelo':'\\033[33m',\r\n 'pretoebranco':'\\033[7;30m'}\r\n\r\nprint('Prazer em te conhecer, {}{}{}!!!'.format(cores['azul'], nome, cores['amarelo']))\r\n# dá pra colocar as cores dentro das chaves tb.\r\n", "<docstring token>\na = 3\nb = 5\nprint('Os valores são \\x1b[32m{}\\x1b[m e \\x1b[31m{}\\x1b[m !!!'.format(a, b))\nnome = 'Kátia'\ncores = {'limpa': '\\x1b]m', 'azul': '\\x1b[34m', 'amarelo': '\\x1b[33m',\n 'pretoebranco': '\\x1b[7;30m'}\nprint('Prazer em te conhecer, {}{}{}!!!'.format(cores['azul'], nome, cores[\n 'amarelo']))\n", "<docstring token>\n<assignment token>\nprint('Os valores são \\x1b[32m{}\\x1b[m e \\x1b[31m{}\\x1b[m !!!'.format(a, b))\n<assignment token>\nprint('Prazer em te conhecer, {}{}{}!!!'.format(cores['azul'], nome, cores[\n 'amarelo']))\n", "<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n" ]
false
394
8ca77ed608108a9aa693acb686156e661794d7ab
# A perfect number is a number for which the sum of its proper divisors is exactly equal to the number. # For example, the sum of the proper divisors of 28 would be 1 + 2 + 4 + 7 + 14 = 28, # which means that 28 is a perfect number. # # A number whose proper divisors are less than the number is called deficient and # a number whose proper divisors exceed the number is called abundant. # # As 12 is the smallest abundant number, 1 + 2 + 3 + 4 + 6 = 16, # the smallest number that can be written as the sum of two abundant numbers is 24. # By mathematical analysis, it can be shown that all integers greater than 28123 # can be written as the sum of two abundant numbers. # However, this upper limit cannot be reduced any further by analysis even though # it is known that the greatest number that cannot be expressed as the sum of two abundant numbers # is less than this limit. # # Find the sum of all the positive integers which cannot be written as the sum of two abundant numbers. UPPER_LIMIT = 28124 import math import cProfile from bisect import bisect def sum_divisors(N): total = 1 for i in xrange(2, math.sqrt(N)+1): if (N % i == 0): total += i if ((i * i) != N): total += (N / i) return total abundant = [] for i in xrange(11, UPPER_LIMIT): if (sum_divisors(i) > i): abundant.append(i) print "found: ", len(abundant), " abundant numbers less than ", UPPER_LIMIT print "highest abundant number: ", abundant[-1] # Smart: compute all the sums of the abundant numbers we have. Store everything in an array. def AddIntegersNotExpressibleAsTheSumOfTwoAbundantNumbers(): # Create an array that is zero everywhere, then punch out the number # that are expressible as the sum of two abundant numbers integers = [0] * UPPER_LIMIT for i in xrange(0, len(abundant)): for j in xrange(i, len(abundant)): addend = abundant[i] + abundant[j] if (addend < UPPER_LIMIT): integers[addend] = 1 else: break; #don't bother going this high # We've filled in the array. Now do the sum return sum(i for i in xrange(0, UPPER_LIMIT) if integers[i] == 0) #cProfile.run('AddIntegersNotExpressibleAsTheSumOfTwoAbundantNumbers()') print AddIntegersNotExpressibleAsTheSumOfTwoAbundantNumbers() # Somebody else (norvig) did this, which is really slick! def norvig(): abundants = set(i for i in range(1,28124) if sum_divisors(i) > i) def abundantsum(i): return any(i-a in abundants for a in abundants) return sum(i for i in range(1,28124) if not abundantsum(i))
[ "# A perfect number is a number for which the sum of its proper divisors is exactly equal to the number. \r\n# For example, the sum of the proper divisors of 28 would be 1 + 2 + 4 + 7 + 14 = 28, \r\n# which means that 28 is a perfect number.\r\n#\r\n# A number whose proper divisors are less than the number is called deficient and \r\n# a number whose proper divisors exceed the number is called abundant.\r\n#\r\n# As 12 is the smallest abundant number, 1 + 2 + 3 + 4 + 6 = 16, \r\n# the smallest number that can be written as the sum of two abundant numbers is 24. \r\n# By mathematical analysis, it can be shown that all integers greater than 28123 \r\n# can be written as the sum of two abundant numbers. \r\n# However, this upper limit cannot be reduced any further by analysis even though\r\n# it is known that the greatest number that cannot be expressed as the sum of two abundant numbers \r\n# is less than this limit.\r\n#\r\n# Find the sum of all the positive integers which cannot be written as the sum of two abundant numbers.\r\n\r\nUPPER_LIMIT = 28124\r\n\r\nimport math\r\nimport cProfile\r\nfrom bisect import bisect\r\ndef sum_divisors(N):\r\n total = 1\r\n for i in xrange(2, math.sqrt(N)+1):\r\n if (N % i == 0):\r\n total += i\r\n if ((i * i) != N):\r\n total += (N / i)\r\n return total\r\n\r\nabundant = []\r\nfor i in xrange(11, UPPER_LIMIT):\r\n if (sum_divisors(i) > i):\r\n abundant.append(i)\r\n\r\n\r\nprint \"found: \", len(abundant), \" abundant numbers less than \", UPPER_LIMIT\r\nprint \"highest abundant number: \", abundant[-1]\r\n\r\n# Smart: compute all the sums of the abundant numbers we have. Store everything in an array.\r\ndef AddIntegersNotExpressibleAsTheSumOfTwoAbundantNumbers():\r\n # Create an array that is zero everywhere, then punch out the number\r\n # that are expressible as the sum of two abundant numbers\r\n integers = [0] * UPPER_LIMIT\r\n for i in xrange(0, len(abundant)):\r\n for j in xrange(i, len(abundant)):\r\n addend = abundant[i] + abundant[j]\r\n if (addend < UPPER_LIMIT):\r\n integers[addend] = 1\r\n else:\r\n break; #don't bother going this high\r\n\r\n # We've filled in the array. Now do the sum\r\n return sum(i for i in xrange(0, UPPER_LIMIT) if integers[i] == 0)\r\n\r\n#cProfile.run('AddIntegersNotExpressibleAsTheSumOfTwoAbundantNumbers()')\r\nprint AddIntegersNotExpressibleAsTheSumOfTwoAbundantNumbers()\r\n\r\n\r\n# Somebody else (norvig) did this, which is really slick!\r\ndef norvig():\r\n abundants = set(i for i in range(1,28124) if sum_divisors(i) > i)\r\n def abundantsum(i):\r\n return any(i-a in abundants for a in abundants)\r\n return sum(i for i in range(1,28124) if not abundantsum(i))\r\n\r\n \r\n \r\n" ]
true
395
b5ac3695a224d531f5baa53a07d3c894d44e8c4c
import matplotlib.pyplot as plt Ci_MSB = [32,16,8,4,2,1] Ci_LSB = [16,8,4,2,1] CB = 1 CP_B = 0 CP_LSB = (32-1)*(CB+CP_B-1)+10 print(CP_LSB) CP_MSB = 0 Csum_LSB = sum(Ci_LSB)+CP_LSB Csum_MSB = sum(Ci_MSB)+CP_MSB Cx = Csum_LSB*Csum_MSB+(CB+CP_B)*Csum_LSB+(CB+CP_B)*Csum_MSB Wi_MSB = [Ci_MSB[i]*(CB+CP_B+Csum_LSB)/Cx for i in range (6)] Wi_LSB = [Ci_LSB[i]*(CB+CP_B)/Cx for i in range (5)] print(Wi_MSB) print(Wi_LSB) def AtoD(vin): code = [0 for i in range(12)] code[0] = 1 if vin > 0 else 0 for i in range(6): vin = vin - Wi_MSB[i] * (code[i]-0.5)*2 code[i+1] = 1 if vin > 0 else 0 for i in range(5): vin = vin - Wi_LSB[i] * (code[i+6]-0.5)*2 code[i + 7] = 1 if vin > 0 else 0 dec_num = 0 for b in code: dec_num = dec_num * 2 + b return dec_num print(AtoD(0.50)) def DtoA_ideal(code): v = -1.0 for i in range(12): v += 2**(11-i)*code[i]/2048 return v print(DtoA_ideal([1,1,1,1,1,1,1,1,1,1,1,1])) n=1000000 x = [-1+i/n for i in range(2*n+1)] y = [AtoD(v) for v in x] # print(y[int(n/6):int(n/6)+100]) bin_num = [i for i in range(4096)] bin_size = [0 for i in range(4096)] left = x[0] for i in range(2*n): if y[i+1]!=y[i]: bin_size[y[i]] = x[i+1] - left left = x[i+1] # print(bin_size) DNL = [data*2047 -1 for data in bin_size] plt.plot(bin_num[1:4094],DNL[1:4094]) # plt.xlim(1000,1005) plt.show() # y = [DtoA_ideal(AtoD(v)) for v in x] # plt.plot(x,y) # plt.xlim(-0.01,0) # plt.ylim(-0.01,0) # plt.show() # def Vout(index): # V = 0.0 # for i in range(6): # V = V + Wi_MSB[i] * int(format(index,'b').zfill(11)[i])*1 # for i in range(5): # V = V + Wi_LSB[i] * int(format(index,'b').zfill(11)[i+6])*1 # return V # print(Vout(2047)) # # x = [i for i in range(2048)] # y = [Vout(i) for i in range(2048)] # DNL = [0]+[y[i+1]-y[i]-Vout(2047)/2047 for i in range(2047)] # DNL = [data*2048 for data in DNL] # INL = [y[i] -i*Vout(2047)/2047 for i in range (2048)] # INL = [data*2048 for data in INL] # # plt.plot(x,DNL) # plt.show()
[ "import matplotlib.pyplot as plt\n\nCi_MSB = [32,16,8,4,2,1]\nCi_LSB = [16,8,4,2,1]\nCB = 1\nCP_B = 0\nCP_LSB = (32-1)*(CB+CP_B-1)+10\nprint(CP_LSB)\nCP_MSB = 0\nCsum_LSB = sum(Ci_LSB)+CP_LSB\nCsum_MSB = sum(Ci_MSB)+CP_MSB\nCx = Csum_LSB*Csum_MSB+(CB+CP_B)*Csum_LSB+(CB+CP_B)*Csum_MSB\nWi_MSB = [Ci_MSB[i]*(CB+CP_B+Csum_LSB)/Cx for i in range (6)]\nWi_LSB = [Ci_LSB[i]*(CB+CP_B)/Cx for i in range (5)]\nprint(Wi_MSB)\nprint(Wi_LSB)\n\ndef AtoD(vin):\n code = [0 for i in range(12)]\n code[0] = 1 if vin > 0 else 0\n for i in range(6):\n vin = vin - Wi_MSB[i] * (code[i]-0.5)*2\n code[i+1] = 1 if vin > 0 else 0\n for i in range(5):\n vin = vin - Wi_LSB[i] * (code[i+6]-0.5)*2\n code[i + 7] = 1 if vin > 0 else 0\n dec_num = 0\n for b in code:\n dec_num = dec_num * 2 + b\n return dec_num\nprint(AtoD(0.50))\n\ndef DtoA_ideal(code):\n v = -1.0\n for i in range(12):\n v += 2**(11-i)*code[i]/2048\n return v\nprint(DtoA_ideal([1,1,1,1,1,1,1,1,1,1,1,1]))\n\n\nn=1000000\nx = [-1+i/n for i in range(2*n+1)]\ny = [AtoD(v) for v in x]\n# print(y[int(n/6):int(n/6)+100])\n\nbin_num = [i for i in range(4096)]\nbin_size = [0 for i in range(4096)]\n\nleft = x[0]\nfor i in range(2*n):\n if y[i+1]!=y[i]:\n bin_size[y[i]] = x[i+1] - left\n left = x[i+1]\n# print(bin_size)\nDNL = [data*2047 -1 for data in bin_size]\nplt.plot(bin_num[1:4094],DNL[1:4094])\n# plt.xlim(1000,1005)\n\nplt.show()\n\n\n\n\n\n# y = [DtoA_ideal(AtoD(v)) for v in x]\n# plt.plot(x,y)\n# plt.xlim(-0.01,0)\n# plt.ylim(-0.01,0)\n# plt.show()\n# def Vout(index):\n# V = 0.0\n# for i in range(6):\n# V = V + Wi_MSB[i] * int(format(index,'b').zfill(11)[i])*1\n# for i in range(5):\n# V = V + Wi_LSB[i] * int(format(index,'b').zfill(11)[i+6])*1\n# return V\n# print(Vout(2047))\n#\n# x = [i for i in range(2048)]\n# y = [Vout(i) for i in range(2048)]\n# DNL = [0]+[y[i+1]-y[i]-Vout(2047)/2047 for i in range(2047)]\n# DNL = [data*2048 for data in DNL]\n# INL = [y[i] -i*Vout(2047)/2047 for i in range (2048)]\n# INL = [data*2048 for data in INL]\n#\n# plt.plot(x,DNL)\n# plt.show()\n", "import matplotlib.pyplot as plt\nCi_MSB = [32, 16, 8, 4, 2, 1]\nCi_LSB = [16, 8, 4, 2, 1]\nCB = 1\nCP_B = 0\nCP_LSB = (32 - 1) * (CB + CP_B - 1) + 10\nprint(CP_LSB)\nCP_MSB = 0\nCsum_LSB = sum(Ci_LSB) + CP_LSB\nCsum_MSB = sum(Ci_MSB) + CP_MSB\nCx = Csum_LSB * Csum_MSB + (CB + CP_B) * Csum_LSB + (CB + CP_B) * Csum_MSB\nWi_MSB = [(Ci_MSB[i] * (CB + CP_B + Csum_LSB) / Cx) for i in range(6)]\nWi_LSB = [(Ci_LSB[i] * (CB + CP_B) / Cx) for i in range(5)]\nprint(Wi_MSB)\nprint(Wi_LSB)\n\n\ndef AtoD(vin):\n code = [(0) for i in range(12)]\n code[0] = 1 if vin > 0 else 0\n for i in range(6):\n vin = vin - Wi_MSB[i] * (code[i] - 0.5) * 2\n code[i + 1] = 1 if vin > 0 else 0\n for i in range(5):\n vin = vin - Wi_LSB[i] * (code[i + 6] - 0.5) * 2\n code[i + 7] = 1 if vin > 0 else 0\n dec_num = 0\n for b in code:\n dec_num = dec_num * 2 + b\n return dec_num\n\n\nprint(AtoD(0.5))\n\n\ndef DtoA_ideal(code):\n v = -1.0\n for i in range(12):\n v += 2 ** (11 - i) * code[i] / 2048\n return v\n\n\nprint(DtoA_ideal([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]))\nn = 1000000\nx = [(-1 + i / n) for i in range(2 * n + 1)]\ny = [AtoD(v) for v in x]\nbin_num = [i for i in range(4096)]\nbin_size = [(0) for i in range(4096)]\nleft = x[0]\nfor i in range(2 * n):\n if y[i + 1] != y[i]:\n bin_size[y[i]] = x[i + 1] - left\n left = x[i + 1]\nDNL = [(data * 2047 - 1) for data in bin_size]\nplt.plot(bin_num[1:4094], DNL[1:4094])\nplt.show()\n", "<import token>\nCi_MSB = [32, 16, 8, 4, 2, 1]\nCi_LSB = [16, 8, 4, 2, 1]\nCB = 1\nCP_B = 0\nCP_LSB = (32 - 1) * (CB + CP_B - 1) + 10\nprint(CP_LSB)\nCP_MSB = 0\nCsum_LSB = sum(Ci_LSB) + CP_LSB\nCsum_MSB = sum(Ci_MSB) + CP_MSB\nCx = Csum_LSB * Csum_MSB + (CB + CP_B) * Csum_LSB + (CB + CP_B) * Csum_MSB\nWi_MSB = [(Ci_MSB[i] * (CB + CP_B + Csum_LSB) / Cx) for i in range(6)]\nWi_LSB = [(Ci_LSB[i] * (CB + CP_B) / Cx) for i in range(5)]\nprint(Wi_MSB)\nprint(Wi_LSB)\n\n\ndef AtoD(vin):\n code = [(0) for i in range(12)]\n code[0] = 1 if vin > 0 else 0\n for i in range(6):\n vin = vin - Wi_MSB[i] * (code[i] - 0.5) * 2\n code[i + 1] = 1 if vin > 0 else 0\n for i in range(5):\n vin = vin - Wi_LSB[i] * (code[i + 6] - 0.5) * 2\n code[i + 7] = 1 if vin > 0 else 0\n dec_num = 0\n for b in code:\n dec_num = dec_num * 2 + b\n return dec_num\n\n\nprint(AtoD(0.5))\n\n\ndef DtoA_ideal(code):\n v = -1.0\n for i in range(12):\n v += 2 ** (11 - i) * code[i] / 2048\n return v\n\n\nprint(DtoA_ideal([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]))\nn = 1000000\nx = [(-1 + i / n) for i in range(2 * n + 1)]\ny = [AtoD(v) for v in x]\nbin_num = [i for i in range(4096)]\nbin_size = [(0) for i in range(4096)]\nleft = x[0]\nfor i in range(2 * n):\n if y[i + 1] != y[i]:\n bin_size[y[i]] = x[i + 1] - left\n left = x[i + 1]\nDNL = [(data * 2047 - 1) for data in bin_size]\nplt.plot(bin_num[1:4094], DNL[1:4094])\nplt.show()\n", "<import token>\n<assignment token>\nprint(CP_LSB)\n<assignment token>\nprint(Wi_MSB)\nprint(Wi_LSB)\n\n\ndef AtoD(vin):\n code = [(0) for i in range(12)]\n code[0] = 1 if vin > 0 else 0\n for i in range(6):\n vin = vin - Wi_MSB[i] * (code[i] - 0.5) * 2\n code[i + 1] = 1 if vin > 0 else 0\n for i in range(5):\n vin = vin - Wi_LSB[i] * (code[i + 6] - 0.5) * 2\n code[i + 7] = 1 if vin > 0 else 0\n dec_num = 0\n for b in code:\n dec_num = dec_num * 2 + b\n return dec_num\n\n\nprint(AtoD(0.5))\n\n\ndef DtoA_ideal(code):\n v = -1.0\n for i in range(12):\n v += 2 ** (11 - i) * code[i] / 2048\n return v\n\n\nprint(DtoA_ideal([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]))\n<assignment token>\nfor i in range(2 * n):\n if y[i + 1] != y[i]:\n bin_size[y[i]] = x[i + 1] - left\n left = x[i + 1]\n<assignment token>\nplt.plot(bin_num[1:4094], DNL[1:4094])\nplt.show()\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef AtoD(vin):\n code = [(0) for i in range(12)]\n code[0] = 1 if vin > 0 else 0\n for i in range(6):\n vin = vin - Wi_MSB[i] * (code[i] - 0.5) * 2\n code[i + 1] = 1 if vin > 0 else 0\n for i in range(5):\n vin = vin - Wi_LSB[i] * (code[i + 6] - 0.5) * 2\n code[i + 7] = 1 if vin > 0 else 0\n dec_num = 0\n for b in code:\n dec_num = dec_num * 2 + b\n return dec_num\n\n\n<code token>\n\n\ndef DtoA_ideal(code):\n v = -1.0\n for i in range(12):\n v += 2 ** (11 - i) * code[i] / 2048\n return v\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<code token>\n\n\ndef DtoA_ideal(code):\n v = -1.0\n for i in range(12):\n v += 2 ** (11 - i) * code[i] / 2048\n return v\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<code token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n" ]
false
396
c9d12f14fa0e46e4590746d45862fe255b415a1d
# vim: expandtab # -*- coding: utf-8 -*- from poleno.utils.template import Library from chcemvediet.apps.obligees.models import Obligee register = Library() @register.simple_tag def gender(gender, masculine, feminine, neuter, plurale): if gender == Obligee.GENDERS.MASCULINE: return masculine elif gender == Obligee.GENDERS.FEMININE: return feminine elif gender == Obligee.GENDERS.NEUTER: return neuter elif gender == Obligee.GENDERS.PLURALE: return plurale else: return u''
[ "# vim: expandtab\n# -*- coding: utf-8 -*-\nfrom poleno.utils.template import Library\nfrom chcemvediet.apps.obligees.models import Obligee\n\n\nregister = Library()\n\[email protected]_tag\ndef gender(gender, masculine, feminine, neuter, plurale):\n if gender == Obligee.GENDERS.MASCULINE:\n return masculine\n elif gender == Obligee.GENDERS.FEMININE:\n return feminine\n elif gender == Obligee.GENDERS.NEUTER:\n return neuter\n elif gender == Obligee.GENDERS.PLURALE:\n return plurale\n else:\n return u''\n", "from poleno.utils.template import Library\nfrom chcemvediet.apps.obligees.models import Obligee\nregister = Library()\n\n\[email protected]_tag\ndef gender(gender, masculine, feminine, neuter, plurale):\n if gender == Obligee.GENDERS.MASCULINE:\n return masculine\n elif gender == Obligee.GENDERS.FEMININE:\n return feminine\n elif gender == Obligee.GENDERS.NEUTER:\n return neuter\n elif gender == Obligee.GENDERS.PLURALE:\n return plurale\n else:\n return u''\n", "<import token>\nregister = Library()\n\n\[email protected]_tag\ndef gender(gender, masculine, feminine, neuter, plurale):\n if gender == Obligee.GENDERS.MASCULINE:\n return masculine\n elif gender == Obligee.GENDERS.FEMININE:\n return feminine\n elif gender == Obligee.GENDERS.NEUTER:\n return neuter\n elif gender == Obligee.GENDERS.PLURALE:\n return plurale\n else:\n return u''\n", "<import token>\n<assignment token>\n\n\[email protected]_tag\ndef gender(gender, masculine, feminine, neuter, plurale):\n if gender == Obligee.GENDERS.MASCULINE:\n return masculine\n elif gender == Obligee.GENDERS.FEMININE:\n return feminine\n elif gender == Obligee.GENDERS.NEUTER:\n return neuter\n elif gender == Obligee.GENDERS.PLURALE:\n return plurale\n else:\n return u''\n", "<import token>\n<assignment token>\n<function token>\n" ]
false
397
58e023c3c453d1e190fdb5bc457358f42d1bd93f
# https://leetcode.com/problems/how-many-numbers-are-smaller-than-the-current-number/ # BruteForce class BruteForceSolution: def smallerNumbersThanCurrent(self, nums): answer = [] for num in nums: counter = 0 for i in range(len(nums)): if nums[i] < num: counter += 1 answer.append(counter) return answer class Solution: def smallerNumbersThanCurrent(self, nums): answer = [] sortedNums = sorted(nums) for num in nums: answer.append(sortedNums.index(num)) return answer example = BruteForceSolution() exampleTwo = Solution() print(example.smallerNumbersThanCurrent([8,1,2,2,3])) print(exampleTwo.smallerNumbersThanCurrent([8,1,2,2,3]))
[ "# https://leetcode.com/problems/how-many-numbers-are-smaller-than-the-current-number/\n\n# BruteForce\n\nclass BruteForceSolution:\n def smallerNumbersThanCurrent(self, nums):\n answer = []\n \n \n for num in nums:\n counter = 0\n for i in range(len(nums)):\n if nums[i] < num:\n counter += 1\n answer.append(counter)\n \n return answer\n\nclass Solution:\n def smallerNumbersThanCurrent(self, nums):\n answer = []\n \n sortedNums = sorted(nums)\n \n for num in nums:\n answer.append(sortedNums.index(num))\n return answer\n \n \n \n \n \n \nexample = BruteForceSolution()\nexampleTwo = Solution()\n\n\nprint(example.smallerNumbersThanCurrent([8,1,2,2,3]))\n\nprint(exampleTwo.smallerNumbersThanCurrent([8,1,2,2,3]))\n", "class BruteForceSolution:\n\n def smallerNumbersThanCurrent(self, nums):\n answer = []\n for num in nums:\n counter = 0\n for i in range(len(nums)):\n if nums[i] < num:\n counter += 1\n answer.append(counter)\n return answer\n\n\nclass Solution:\n\n def smallerNumbersThanCurrent(self, nums):\n answer = []\n sortedNums = sorted(nums)\n for num in nums:\n answer.append(sortedNums.index(num))\n return answer\n\n\nexample = BruteForceSolution()\nexampleTwo = Solution()\nprint(example.smallerNumbersThanCurrent([8, 1, 2, 2, 3]))\nprint(exampleTwo.smallerNumbersThanCurrent([8, 1, 2, 2, 3]))\n", "class BruteForceSolution:\n\n def smallerNumbersThanCurrent(self, nums):\n answer = []\n for num in nums:\n counter = 0\n for i in range(len(nums)):\n if nums[i] < num:\n counter += 1\n answer.append(counter)\n return answer\n\n\nclass Solution:\n\n def smallerNumbersThanCurrent(self, nums):\n answer = []\n sortedNums = sorted(nums)\n for num in nums:\n answer.append(sortedNums.index(num))\n return answer\n\n\n<assignment token>\nprint(example.smallerNumbersThanCurrent([8, 1, 2, 2, 3]))\nprint(exampleTwo.smallerNumbersThanCurrent([8, 1, 2, 2, 3]))\n", "class BruteForceSolution:\n\n def smallerNumbersThanCurrent(self, nums):\n answer = []\n for num in nums:\n counter = 0\n for i in range(len(nums)):\n if nums[i] < num:\n counter += 1\n answer.append(counter)\n return answer\n\n\nclass Solution:\n\n def smallerNumbersThanCurrent(self, nums):\n answer = []\n sortedNums = sorted(nums)\n for num in nums:\n answer.append(sortedNums.index(num))\n return answer\n\n\n<assignment token>\n<code token>\n", "class BruteForceSolution:\n <function token>\n\n\nclass Solution:\n\n def smallerNumbersThanCurrent(self, nums):\n answer = []\n sortedNums = sorted(nums)\n for num in nums:\n answer.append(sortedNums.index(num))\n return answer\n\n\n<assignment token>\n<code token>\n", "<class token>\n\n\nclass Solution:\n\n def smallerNumbersThanCurrent(self, nums):\n answer = []\n sortedNums = sorted(nums)\n for num in nums:\n answer.append(sortedNums.index(num))\n return answer\n\n\n<assignment token>\n<code token>\n", "<class token>\n\n\nclass Solution:\n <function token>\n\n\n<assignment token>\n<code token>\n", "<class token>\n<class token>\n<assignment token>\n<code token>\n" ]
false
398
b4b7e20c9558bd1b29a1c1fa24bfca8a2d292b27
import xml.etree.ElementTree as ET #tree = ET.parse('rutas/rutas_prueba.xml') #treeToAdd = ET.parse('rutas/rutas_prueba_agregar.xml') #root = tree.getroot() #git rootToAdd = treeToAdd.getroot() #for child in root: # for test in child: # print(test.tag, test.attrib) #for elem in root.iter(): # print(elem.tag) #prueba = [elem.tag for elem in root.iter()] #print(prueba) #print(ET.tostring(root, encoding='utf8').decode('utf8')) # for elem in rootToAdd: # root.append(elem) # # tree.write('rutas/probando_agregados.xml') #get the tree for each routes file rutas0k_10k = ET.parse('rutas/rutas0k-10k.xml') rutas10k_30k = ET.parse('rutas/rutas10k-30k.xml') rutas30k_50k = ET.parse('rutas/rutas30k-50k.xml') rutas50k_70k = ET.parse('rutas/rutas50k-70k.xml') rutas70k_90k = ET.parse('rutas/rutas70k-90k.xml') rutas90k_110k = ET.parse('rutas/rutas90k-110k.xml') #root for each routes tree root1 = rutas0k_10k.getroot() root2 = rutas10k_30k.getroot() root3 = rutas30k_50k.getroot() root4 = rutas50k_70k.getroot() root5 = rutas70k_90k.getroot() root6 = rutas90k_110k.getroot() #each root except first root rootsToAdd = [root2,root3,root4,root5,root6] #add each element to the first tree for root in rootsToAdd: for elem in root: root1.append(elem) #write the tree to a new file rutas0k_10k.write('rutas/rutas0k-110k.xml')
[ "import xml.etree.ElementTree as ET\n\n#tree = ET.parse('rutas/rutas_prueba.xml')\n#treeToAdd = ET.parse('rutas/rutas_prueba_agregar.xml')\n\n#root = tree.getroot()\n\n#git rootToAdd = treeToAdd.getroot()\n\n#for child in root:\n# for test in child:\n# print(test.tag, test.attrib)\n\n\n#for elem in root.iter():\n# print(elem.tag)\n\n#prueba = [elem.tag for elem in root.iter()]\n#print(prueba)\n#print(ET.tostring(root, encoding='utf8').decode('utf8'))\n\n# for elem in rootToAdd:\n# root.append(elem)\n#\n# tree.write('rutas/probando_agregados.xml')\n\n#get the tree for each routes file\nrutas0k_10k = ET.parse('rutas/rutas0k-10k.xml')\nrutas10k_30k = ET.parse('rutas/rutas10k-30k.xml')\nrutas30k_50k = ET.parse('rutas/rutas30k-50k.xml')\nrutas50k_70k = ET.parse('rutas/rutas50k-70k.xml')\nrutas70k_90k = ET.parse('rutas/rutas70k-90k.xml')\nrutas90k_110k = ET.parse('rutas/rutas90k-110k.xml')\n\n#root for each routes tree\nroot1 = rutas0k_10k.getroot()\nroot2 = rutas10k_30k.getroot()\nroot3 = rutas30k_50k.getroot()\nroot4 = rutas50k_70k.getroot()\nroot5 = rutas70k_90k.getroot()\nroot6 = rutas90k_110k.getroot()\n\n#each root except first root\nrootsToAdd = [root2,root3,root4,root5,root6]\n\n#add each element to the first tree\nfor root in rootsToAdd:\n for elem in root:\n root1.append(elem)\n\n#write the tree to a new file\nrutas0k_10k.write('rutas/rutas0k-110k.xml')\n\n\n", "import xml.etree.ElementTree as ET\nrutas0k_10k = ET.parse('rutas/rutas0k-10k.xml')\nrutas10k_30k = ET.parse('rutas/rutas10k-30k.xml')\nrutas30k_50k = ET.parse('rutas/rutas30k-50k.xml')\nrutas50k_70k = ET.parse('rutas/rutas50k-70k.xml')\nrutas70k_90k = ET.parse('rutas/rutas70k-90k.xml')\nrutas90k_110k = ET.parse('rutas/rutas90k-110k.xml')\nroot1 = rutas0k_10k.getroot()\nroot2 = rutas10k_30k.getroot()\nroot3 = rutas30k_50k.getroot()\nroot4 = rutas50k_70k.getroot()\nroot5 = rutas70k_90k.getroot()\nroot6 = rutas90k_110k.getroot()\nrootsToAdd = [root2, root3, root4, root5, root6]\nfor root in rootsToAdd:\n for elem in root:\n root1.append(elem)\nrutas0k_10k.write('rutas/rutas0k-110k.xml')\n", "<import token>\nrutas0k_10k = ET.parse('rutas/rutas0k-10k.xml')\nrutas10k_30k = ET.parse('rutas/rutas10k-30k.xml')\nrutas30k_50k = ET.parse('rutas/rutas30k-50k.xml')\nrutas50k_70k = ET.parse('rutas/rutas50k-70k.xml')\nrutas70k_90k = ET.parse('rutas/rutas70k-90k.xml')\nrutas90k_110k = ET.parse('rutas/rutas90k-110k.xml')\nroot1 = rutas0k_10k.getroot()\nroot2 = rutas10k_30k.getroot()\nroot3 = rutas30k_50k.getroot()\nroot4 = rutas50k_70k.getroot()\nroot5 = rutas70k_90k.getroot()\nroot6 = rutas90k_110k.getroot()\nrootsToAdd = [root2, root3, root4, root5, root6]\nfor root in rootsToAdd:\n for elem in root:\n root1.append(elem)\nrutas0k_10k.write('rutas/rutas0k-110k.xml')\n", "<import token>\n<assignment token>\nfor root in rootsToAdd:\n for elem in root:\n root1.append(elem)\nrutas0k_10k.write('rutas/rutas0k-110k.xml')\n", "<import token>\n<assignment token>\n<code token>\n" ]
false
399
97bbb181cbc0f5bfbf0b2298133fc226b6217d91
import tensorflow as tf from tensorflow.python.framework import graph_util from net import siameseNet_batchnorm as siameseNet import dataset import numpy as np import cv2 import os batch_size=64 input_height=32 input_width=32 total_epoch_num=50 snapshot=100 support_image_extensions=[".jpg",".png",".jpeg",".bmp"] margin=1.0 channals=3 train_image_root="D:/forTensorflow/charRecTrain/forMyDNNCode/train" test_image_root="D:/forTensorflow/charRecTrain/forMyDNNCode/test" model_path="models/" pb_path=os.path.join(model_path,"pb/") ckpt_path=os.path.join(model_path,"ckpt/") if not os.path.exists(pb_path): os.makedirs(pb_path) if not os.path.exists(ckpt_path): os.makedirs(ckpt_path) model_name="siamese_triplet_28out_allloss_bn" if __name__ == '__main__': # image_paths,labels=get_images_path(test_image_root) # data=next_batch(True,None,image_paths,labels) # for left,right,label in zip(*data): # cv2.imshow("left",left) # cv2.imshow("right", right) # print(label) # cv2.waitKey(0) first_shape=None anchor_placeholder = tf.placeholder(tf.float32,shape=[first_shape,input_height,input_width,channals],name="anchor") similar_placeholder = tf.placeholder(tf.float32, shape=[first_shape, input_height, input_width, channals], name="similar") dissimilar_placeholder = tf.placeholder(tf.float32, shape=[first_shape, input_height, input_width, channals], name="dissimilar") labels_placeholder = tf.placeholder(tf.float32, shape= [None if first_shape is None else first_shape * 3, ], name="labels") is_training_placeholder = tf.placeholder_with_default(False, shape=(), name="is_training") siamese_net=siameseNet.siameseNet() anchor = siamese_net.inference(anchor_placeholder,reuse=False,is_training=is_training_placeholder) similar = siamese_net.inference(similar_placeholder,reuse=True,is_training=is_training_placeholder) dissimilar = siamese_net.inference(dissimilar_placeholder,reuse=True,is_training=is_training_placeholder) loss,pos_dist,neg_dist = siamese_net.loss(anchor,similar,dissimilar,labels_placeholder,margin) flatten_out_anchor = tf.identity(anchor, name="flatten_anchor") flatten_out_similar = tf.identity(similar, name="flatten_similar") flatten_out_dissimilar = tf.identity(dissimilar, name="flatten_dissimilar") update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) global_step = tf.Variable(0, trainable=False) # learning_rate = tf.train.exponential_decay(0.01, global_step, 100, 0.9) # optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9) with tf.control_dependencies([tf.group(*update_ops)]): # train_step = optimizer.minimize(loss, global_step) train_step = tf.train.MomentumOptimizer(0.01, 0.90).\ minimize(loss, global_step=global_step) var_list = tf.trainable_variables() if global_step is not None: var_list.append(global_step) g_list = tf.global_variables() # 从全局变量中获得batch norm的缩放和偏差 bn_moving_vars = [g for g in g_list if 'moving_mean' in g.name] bn_moving_vars += [g for g in g_list if 'moving_variance' in g.name] var_list += bn_moving_vars ckpt_saver = tf.train.Saver() train_dataset = dataset.dataset(train_image_root,batch_size,support_image_extensions, input_height,input_width,channals) test_dataset = dataset.dataset(test_image_root, batch_size, support_image_extensions, input_height, input_width, channals) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) # if os.path.exists(os.path.join(ckpt_path, "checkpoint")): # ckpt_saver.restore(sess, tf.train.latest_checkpoint(ckpt_path)) total_iters_num = 0 for epoch_num in range(total_epoch_num): train_images_num = train_dataset.sample_len cur_epoch_iters_num = train_images_num // batch_size for iters_num in range(cur_epoch_iters_num): train_anchor, train_similar, train_dissimilar,train_labels = \ train_dataset.next_triplet_batch() test_anchor, test_similar, test_dissimilar,test_labels = \ test_dataset.next_triplet_batch() if train_anchor is None or test_anchor is None: continue train_dict = {anchor_placeholder: train_anchor, similar_placeholder: train_similar, dissimilar_placeholder: train_dissimilar, labels_placeholder:train_labels, is_training_placeholder:True} test_dict = {anchor_placeholder: test_anchor, similar_placeholder: test_similar, dissimilar_placeholder: test_dissimilar, labels_placeholder:test_labels, is_training_placeholder: False} _,_global_step=sess.run([train_step,global_step], feed_dict=train_dict) anchor_out,similar_out,dissimilar_out = sess.run([ flatten_out_anchor,flatten_out_similar,flatten_out_dissimilar], feed_dict=train_dict) _train_loss,_train_pos_dist,_train_neg_dist = \ sess.run([loss,pos_dist,neg_dist], feed_dict=train_dict) _test_loss,_test_pos_dist,_test_neg_dist =\ sess.run([loss,pos_dist,neg_dist], feed_dict=test_dict) print("distance:",list(zip(_train_pos_dist.flatten(),_train_neg_dist.flatten()))[:5]) one_moving_meaning_show = "No mean or variance" if len(bn_moving_vars) > 0: one_moving_meaning = sess.graph.get_tensor_by_name(bn_moving_vars[0].name) one_moving_meaning_show = "{}={}".\ format(bn_moving_vars[0].name,np.mean(one_moving_meaning.eval())) print(one_moving_meaning_show) show_text = "epoch:{},epoch-iters:{},total-iters:{},loss:{},lr:{},val:{}".format \ (epoch_num, iters_num + 1, _global_step, _train_loss, "0.99", _test_loss) print(show_text) if _global_step % snapshot == 0: # 保存PB constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def, ["flatten_anchor"]) save_model_name=model_name + "-" + str(_global_step) + ".pb" with tf.gfile.FastGFile(pb_path + save_model_name, mode="wb") as fw: fw.write(constant_graph.SerializeToString()) # 保存CKPT ckpt_saver.save(sess, ckpt_path + model_name + ".ckpt", global_step=total_iters_num) print("Successfully saved model {}".format(save_model_name))
[ "import tensorflow as tf\nfrom tensorflow.python.framework import graph_util\nfrom net import siameseNet_batchnorm as siameseNet\nimport dataset\nimport numpy as np\nimport cv2\nimport os\n\nbatch_size=64\ninput_height=32\ninput_width=32\ntotal_epoch_num=50\nsnapshot=100\nsupport_image_extensions=[\".jpg\",\".png\",\".jpeg\",\".bmp\"]\nmargin=1.0\nchannals=3\n\ntrain_image_root=\"D:/forTensorflow/charRecTrain/forMyDNNCode/train\"\ntest_image_root=\"D:/forTensorflow/charRecTrain/forMyDNNCode/test\"\n\nmodel_path=\"models/\"\npb_path=os.path.join(model_path,\"pb/\")\nckpt_path=os.path.join(model_path,\"ckpt/\")\n\nif not os.path.exists(pb_path):\n os.makedirs(pb_path)\nif not os.path.exists(ckpt_path):\n os.makedirs(ckpt_path)\nmodel_name=\"siamese_triplet_28out_allloss_bn\"\n\nif __name__ == '__main__':\n # image_paths,labels=get_images_path(test_image_root)\n # data=next_batch(True,None,image_paths,labels)\n # for left,right,label in zip(*data):\n # cv2.imshow(\"left\",left)\n # cv2.imshow(\"right\", right)\n # print(label)\n # cv2.waitKey(0)\n\n first_shape=None\n anchor_placeholder = tf.placeholder(tf.float32,shape=[first_shape,input_height,input_width,channals],name=\"anchor\")\n similar_placeholder = tf.placeholder(tf.float32, shape=[first_shape, input_height, input_width, channals], name=\"similar\")\n dissimilar_placeholder = tf.placeholder(tf.float32, shape=[first_shape, input_height, input_width, channals], name=\"dissimilar\")\n labels_placeholder = tf.placeholder(tf.float32, shape=\n [None if first_shape is None else first_shape * 3, ], name=\"labels\")\n is_training_placeholder = tf.placeholder_with_default(False, shape=(), name=\"is_training\")\n siamese_net=siameseNet.siameseNet()\n\n anchor = siamese_net.inference(anchor_placeholder,reuse=False,is_training=is_training_placeholder)\n similar = siamese_net.inference(similar_placeholder,reuse=True,is_training=is_training_placeholder)\n dissimilar = siamese_net.inference(dissimilar_placeholder,reuse=True,is_training=is_training_placeholder)\n loss,pos_dist,neg_dist = siamese_net.loss(anchor,similar,dissimilar,labels_placeholder,margin)\n\n flatten_out_anchor = tf.identity(anchor, name=\"flatten_anchor\")\n flatten_out_similar = tf.identity(similar, name=\"flatten_similar\")\n flatten_out_dissimilar = tf.identity(dissimilar, name=\"flatten_dissimilar\")\n\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n global_step = tf.Variable(0, trainable=False)\n # learning_rate = tf.train.exponential_decay(0.01, global_step, 100, 0.9)\n # optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9)\n\n with tf.control_dependencies([tf.group(*update_ops)]):\n # train_step = optimizer.minimize(loss, global_step)\n train_step = tf.train.MomentumOptimizer(0.01, 0.90).\\\n minimize(loss, global_step=global_step)\n\n var_list = tf.trainable_variables()\n if global_step is not None:\n var_list.append(global_step)\n g_list = tf.global_variables() # 从全局变量中获得batch norm的缩放和偏差\n bn_moving_vars = [g for g in g_list if 'moving_mean' in g.name]\n bn_moving_vars += [g for g in g_list if 'moving_variance' in g.name]\n var_list += bn_moving_vars\n\n ckpt_saver = tf.train.Saver()\n train_dataset = dataset.dataset(train_image_root,batch_size,support_image_extensions,\n input_height,input_width,channals)\n\n test_dataset = dataset.dataset(test_image_root, batch_size, support_image_extensions,\n input_height, input_width, channals)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n # if os.path.exists(os.path.join(ckpt_path, \"checkpoint\")):\n # ckpt_saver.restore(sess, tf.train.latest_checkpoint(ckpt_path))\n\n total_iters_num = 0\n for epoch_num in range(total_epoch_num):\n\n train_images_num = train_dataset.sample_len\n cur_epoch_iters_num = train_images_num // batch_size\n for iters_num in range(cur_epoch_iters_num):\n\n train_anchor, train_similar, train_dissimilar,train_labels = \\\n train_dataset.next_triplet_batch()\n test_anchor, test_similar, test_dissimilar,test_labels = \\\n test_dataset.next_triplet_batch()\n\n if train_anchor is None or test_anchor is None:\n continue\n train_dict = {anchor_placeholder: train_anchor,\n similar_placeholder: train_similar,\n dissimilar_placeholder: train_dissimilar,\n\t\t\t\t\t\t\t labels_placeholder:train_labels,\n is_training_placeholder:True}\n test_dict = {anchor_placeholder: test_anchor,\n similar_placeholder: test_similar,\n dissimilar_placeholder: test_dissimilar,\n\t\t\t\t\t\t\t labels_placeholder:test_labels,\n is_training_placeholder: False}\n _,_global_step=sess.run([train_step,global_step], feed_dict=train_dict)\n\n anchor_out,similar_out,dissimilar_out = sess.run([\n flatten_out_anchor,flatten_out_similar,flatten_out_dissimilar],\n feed_dict=train_dict)\n\n _train_loss,_train_pos_dist,_train_neg_dist = \\\n sess.run([loss,pos_dist,neg_dist], feed_dict=train_dict)\n _test_loss,_test_pos_dist,_test_neg_dist =\\\n sess.run([loss,pos_dist,neg_dist], feed_dict=test_dict)\n\n print(\"distance:\",list(zip(_train_pos_dist.flatten(),_train_neg_dist.flatten()))[:5])\n one_moving_meaning_show = \"No mean or variance\"\n if len(bn_moving_vars) > 0:\n one_moving_meaning = sess.graph.get_tensor_by_name(bn_moving_vars[0].name)\n one_moving_meaning_show = \"{}={}\".\\\n format(bn_moving_vars[0].name,np.mean(one_moving_meaning.eval()))\n\n print(one_moving_meaning_show)\n show_text = \"epoch:{},epoch-iters:{},total-iters:{},loss:{},lr:{},val:{}\".format \\\n (epoch_num, iters_num + 1, _global_step, _train_loss, \"0.99\", _test_loss)\n print(show_text)\n\n if _global_step % snapshot == 0:\n # 保存PB\n constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def, [\"flatten_anchor\"])\n save_model_name=model_name + \"-\" + str(_global_step) + \".pb\"\n with tf.gfile.FastGFile(pb_path + save_model_name, mode=\"wb\") as fw:\n fw.write(constant_graph.SerializeToString())\n # 保存CKPT\n ckpt_saver.save(sess, ckpt_path + model_name + \".ckpt\", global_step=total_iters_num)\n print(\"Successfully saved model {}\".format(save_model_name))\n\n\n\n\n", "import tensorflow as tf\nfrom tensorflow.python.framework import graph_util\nfrom net import siameseNet_batchnorm as siameseNet\nimport dataset\nimport numpy as np\nimport cv2\nimport os\nbatch_size = 64\ninput_height = 32\ninput_width = 32\ntotal_epoch_num = 50\nsnapshot = 100\nsupport_image_extensions = ['.jpg', '.png', '.jpeg', '.bmp']\nmargin = 1.0\nchannals = 3\ntrain_image_root = 'D:/forTensorflow/charRecTrain/forMyDNNCode/train'\ntest_image_root = 'D:/forTensorflow/charRecTrain/forMyDNNCode/test'\nmodel_path = 'models/'\npb_path = os.path.join(model_path, 'pb/')\nckpt_path = os.path.join(model_path, 'ckpt/')\nif not os.path.exists(pb_path):\n os.makedirs(pb_path)\nif not os.path.exists(ckpt_path):\n os.makedirs(ckpt_path)\nmodel_name = 'siamese_triplet_28out_allloss_bn'\nif __name__ == '__main__':\n first_shape = None\n anchor_placeholder = tf.placeholder(tf.float32, shape=[first_shape,\n input_height, input_width, channals], name='anchor')\n similar_placeholder = tf.placeholder(tf.float32, shape=[first_shape,\n input_height, input_width, channals], name='similar')\n dissimilar_placeholder = tf.placeholder(tf.float32, shape=[first_shape,\n input_height, input_width, channals], name='dissimilar')\n labels_placeholder = tf.placeholder(tf.float32, shape=[None if \n first_shape is None else first_shape * 3], name='labels')\n is_training_placeholder = tf.placeholder_with_default(False, shape=(),\n name='is_training')\n siamese_net = siameseNet.siameseNet()\n anchor = siamese_net.inference(anchor_placeholder, reuse=False,\n is_training=is_training_placeholder)\n similar = siamese_net.inference(similar_placeholder, reuse=True,\n is_training=is_training_placeholder)\n dissimilar = siamese_net.inference(dissimilar_placeholder, reuse=True,\n is_training=is_training_placeholder)\n loss, pos_dist, neg_dist = siamese_net.loss(anchor, similar, dissimilar,\n labels_placeholder, margin)\n flatten_out_anchor = tf.identity(anchor, name='flatten_anchor')\n flatten_out_similar = tf.identity(similar, name='flatten_similar')\n flatten_out_dissimilar = tf.identity(dissimilar, name='flatten_dissimilar')\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n global_step = tf.Variable(0, trainable=False)\n with tf.control_dependencies([tf.group(*update_ops)]):\n train_step = tf.train.MomentumOptimizer(0.01, 0.9).minimize(loss,\n global_step=global_step)\n var_list = tf.trainable_variables()\n if global_step is not None:\n var_list.append(global_step)\n g_list = tf.global_variables()\n bn_moving_vars = [g for g in g_list if 'moving_mean' in g.name]\n bn_moving_vars += [g for g in g_list if 'moving_variance' in g.name]\n var_list += bn_moving_vars\n ckpt_saver = tf.train.Saver()\n train_dataset = dataset.dataset(train_image_root, batch_size,\n support_image_extensions, input_height, input_width, channals)\n test_dataset = dataset.dataset(test_image_root, batch_size,\n support_image_extensions, input_height, input_width, channals)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n total_iters_num = 0\n for epoch_num in range(total_epoch_num):\n train_images_num = train_dataset.sample_len\n cur_epoch_iters_num = train_images_num // batch_size\n for iters_num in range(cur_epoch_iters_num):\n (train_anchor, train_similar, train_dissimilar, train_labels\n ) = train_dataset.next_triplet_batch()\n test_anchor, test_similar, test_dissimilar, test_labels = (\n test_dataset.next_triplet_batch())\n if train_anchor is None or test_anchor is None:\n continue\n train_dict = {anchor_placeholder: train_anchor,\n similar_placeholder: train_similar,\n dissimilar_placeholder: train_dissimilar,\n labels_placeholder: train_labels,\n is_training_placeholder: True}\n test_dict = {anchor_placeholder: test_anchor,\n similar_placeholder: test_similar,\n dissimilar_placeholder: test_dissimilar,\n labels_placeholder: test_labels,\n is_training_placeholder: False}\n _, _global_step = sess.run([train_step, global_step],\n feed_dict=train_dict)\n anchor_out, similar_out, dissimilar_out = sess.run([\n flatten_out_anchor, flatten_out_similar,\n flatten_out_dissimilar], feed_dict=train_dict)\n _train_loss, _train_pos_dist, _train_neg_dist = sess.run([\n loss, pos_dist, neg_dist], feed_dict=train_dict)\n _test_loss, _test_pos_dist, _test_neg_dist = sess.run([loss,\n pos_dist, neg_dist], feed_dict=test_dict)\n print('distance:', list(zip(_train_pos_dist.flatten(),\n _train_neg_dist.flatten()))[:5])\n one_moving_meaning_show = 'No mean or variance'\n if len(bn_moving_vars) > 0:\n one_moving_meaning = sess.graph.get_tensor_by_name(\n bn_moving_vars[0].name)\n one_moving_meaning_show = '{}={}'.format(bn_moving_vars\n [0].name, np.mean(one_moving_meaning.eval()))\n print(one_moving_meaning_show)\n show_text = (\n 'epoch:{},epoch-iters:{},total-iters:{},loss:{},lr:{},val:{}'\n .format(epoch_num, iters_num + 1, _global_step,\n _train_loss, '0.99', _test_loss))\n print(show_text)\n if _global_step % snapshot == 0:\n constant_graph = graph_util.convert_variables_to_constants(\n sess, sess.graph_def, ['flatten_anchor'])\n save_model_name = model_name + '-' + str(_global_step\n ) + '.pb'\n with tf.gfile.FastGFile(pb_path + save_model_name, mode\n ='wb') as fw:\n fw.write(constant_graph.SerializeToString())\n ckpt_saver.save(sess, ckpt_path + model_name + '.ckpt',\n global_step=total_iters_num)\n print('Successfully saved model {}'.format(save_model_name)\n )\n", "<import token>\nbatch_size = 64\ninput_height = 32\ninput_width = 32\ntotal_epoch_num = 50\nsnapshot = 100\nsupport_image_extensions = ['.jpg', '.png', '.jpeg', '.bmp']\nmargin = 1.0\nchannals = 3\ntrain_image_root = 'D:/forTensorflow/charRecTrain/forMyDNNCode/train'\ntest_image_root = 'D:/forTensorflow/charRecTrain/forMyDNNCode/test'\nmodel_path = 'models/'\npb_path = os.path.join(model_path, 'pb/')\nckpt_path = os.path.join(model_path, 'ckpt/')\nif not os.path.exists(pb_path):\n os.makedirs(pb_path)\nif not os.path.exists(ckpt_path):\n os.makedirs(ckpt_path)\nmodel_name = 'siamese_triplet_28out_allloss_bn'\nif __name__ == '__main__':\n first_shape = None\n anchor_placeholder = tf.placeholder(tf.float32, shape=[first_shape,\n input_height, input_width, channals], name='anchor')\n similar_placeholder = tf.placeholder(tf.float32, shape=[first_shape,\n input_height, input_width, channals], name='similar')\n dissimilar_placeholder = tf.placeholder(tf.float32, shape=[first_shape,\n input_height, input_width, channals], name='dissimilar')\n labels_placeholder = tf.placeholder(tf.float32, shape=[None if \n first_shape is None else first_shape * 3], name='labels')\n is_training_placeholder = tf.placeholder_with_default(False, shape=(),\n name='is_training')\n siamese_net = siameseNet.siameseNet()\n anchor = siamese_net.inference(anchor_placeholder, reuse=False,\n is_training=is_training_placeholder)\n similar = siamese_net.inference(similar_placeholder, reuse=True,\n is_training=is_training_placeholder)\n dissimilar = siamese_net.inference(dissimilar_placeholder, reuse=True,\n is_training=is_training_placeholder)\n loss, pos_dist, neg_dist = siamese_net.loss(anchor, similar, dissimilar,\n labels_placeholder, margin)\n flatten_out_anchor = tf.identity(anchor, name='flatten_anchor')\n flatten_out_similar = tf.identity(similar, name='flatten_similar')\n flatten_out_dissimilar = tf.identity(dissimilar, name='flatten_dissimilar')\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n global_step = tf.Variable(0, trainable=False)\n with tf.control_dependencies([tf.group(*update_ops)]):\n train_step = tf.train.MomentumOptimizer(0.01, 0.9).minimize(loss,\n global_step=global_step)\n var_list = tf.trainable_variables()\n if global_step is not None:\n var_list.append(global_step)\n g_list = tf.global_variables()\n bn_moving_vars = [g for g in g_list if 'moving_mean' in g.name]\n bn_moving_vars += [g for g in g_list if 'moving_variance' in g.name]\n var_list += bn_moving_vars\n ckpt_saver = tf.train.Saver()\n train_dataset = dataset.dataset(train_image_root, batch_size,\n support_image_extensions, input_height, input_width, channals)\n test_dataset = dataset.dataset(test_image_root, batch_size,\n support_image_extensions, input_height, input_width, channals)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n total_iters_num = 0\n for epoch_num in range(total_epoch_num):\n train_images_num = train_dataset.sample_len\n cur_epoch_iters_num = train_images_num // batch_size\n for iters_num in range(cur_epoch_iters_num):\n (train_anchor, train_similar, train_dissimilar, train_labels\n ) = train_dataset.next_triplet_batch()\n test_anchor, test_similar, test_dissimilar, test_labels = (\n test_dataset.next_triplet_batch())\n if train_anchor is None or test_anchor is None:\n continue\n train_dict = {anchor_placeholder: train_anchor,\n similar_placeholder: train_similar,\n dissimilar_placeholder: train_dissimilar,\n labels_placeholder: train_labels,\n is_training_placeholder: True}\n test_dict = {anchor_placeholder: test_anchor,\n similar_placeholder: test_similar,\n dissimilar_placeholder: test_dissimilar,\n labels_placeholder: test_labels,\n is_training_placeholder: False}\n _, _global_step = sess.run([train_step, global_step],\n feed_dict=train_dict)\n anchor_out, similar_out, dissimilar_out = sess.run([\n flatten_out_anchor, flatten_out_similar,\n flatten_out_dissimilar], feed_dict=train_dict)\n _train_loss, _train_pos_dist, _train_neg_dist = sess.run([\n loss, pos_dist, neg_dist], feed_dict=train_dict)\n _test_loss, _test_pos_dist, _test_neg_dist = sess.run([loss,\n pos_dist, neg_dist], feed_dict=test_dict)\n print('distance:', list(zip(_train_pos_dist.flatten(),\n _train_neg_dist.flatten()))[:5])\n one_moving_meaning_show = 'No mean or variance'\n if len(bn_moving_vars) > 0:\n one_moving_meaning = sess.graph.get_tensor_by_name(\n bn_moving_vars[0].name)\n one_moving_meaning_show = '{}={}'.format(bn_moving_vars\n [0].name, np.mean(one_moving_meaning.eval()))\n print(one_moving_meaning_show)\n show_text = (\n 'epoch:{},epoch-iters:{},total-iters:{},loss:{},lr:{},val:{}'\n .format(epoch_num, iters_num + 1, _global_step,\n _train_loss, '0.99', _test_loss))\n print(show_text)\n if _global_step % snapshot == 0:\n constant_graph = graph_util.convert_variables_to_constants(\n sess, sess.graph_def, ['flatten_anchor'])\n save_model_name = model_name + '-' + str(_global_step\n ) + '.pb'\n with tf.gfile.FastGFile(pb_path + save_model_name, mode\n ='wb') as fw:\n fw.write(constant_graph.SerializeToString())\n ckpt_saver.save(sess, ckpt_path + model_name + '.ckpt',\n global_step=total_iters_num)\n print('Successfully saved model {}'.format(save_model_name)\n )\n", "<import token>\n<assignment token>\nif not os.path.exists(pb_path):\n os.makedirs(pb_path)\nif not os.path.exists(ckpt_path):\n os.makedirs(ckpt_path)\n<assignment token>\nif __name__ == '__main__':\n first_shape = None\n anchor_placeholder = tf.placeholder(tf.float32, shape=[first_shape,\n input_height, input_width, channals], name='anchor')\n similar_placeholder = tf.placeholder(tf.float32, shape=[first_shape,\n input_height, input_width, channals], name='similar')\n dissimilar_placeholder = tf.placeholder(tf.float32, shape=[first_shape,\n input_height, input_width, channals], name='dissimilar')\n labels_placeholder = tf.placeholder(tf.float32, shape=[None if \n first_shape is None else first_shape * 3], name='labels')\n is_training_placeholder = tf.placeholder_with_default(False, shape=(),\n name='is_training')\n siamese_net = siameseNet.siameseNet()\n anchor = siamese_net.inference(anchor_placeholder, reuse=False,\n is_training=is_training_placeholder)\n similar = siamese_net.inference(similar_placeholder, reuse=True,\n is_training=is_training_placeholder)\n dissimilar = siamese_net.inference(dissimilar_placeholder, reuse=True,\n is_training=is_training_placeholder)\n loss, pos_dist, neg_dist = siamese_net.loss(anchor, similar, dissimilar,\n labels_placeholder, margin)\n flatten_out_anchor = tf.identity(anchor, name='flatten_anchor')\n flatten_out_similar = tf.identity(similar, name='flatten_similar')\n flatten_out_dissimilar = tf.identity(dissimilar, name='flatten_dissimilar')\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n global_step = tf.Variable(0, trainable=False)\n with tf.control_dependencies([tf.group(*update_ops)]):\n train_step = tf.train.MomentumOptimizer(0.01, 0.9).minimize(loss,\n global_step=global_step)\n var_list = tf.trainable_variables()\n if global_step is not None:\n var_list.append(global_step)\n g_list = tf.global_variables()\n bn_moving_vars = [g for g in g_list if 'moving_mean' in g.name]\n bn_moving_vars += [g for g in g_list if 'moving_variance' in g.name]\n var_list += bn_moving_vars\n ckpt_saver = tf.train.Saver()\n train_dataset = dataset.dataset(train_image_root, batch_size,\n support_image_extensions, input_height, input_width, channals)\n test_dataset = dataset.dataset(test_image_root, batch_size,\n support_image_extensions, input_height, input_width, channals)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n total_iters_num = 0\n for epoch_num in range(total_epoch_num):\n train_images_num = train_dataset.sample_len\n cur_epoch_iters_num = train_images_num // batch_size\n for iters_num in range(cur_epoch_iters_num):\n (train_anchor, train_similar, train_dissimilar, train_labels\n ) = train_dataset.next_triplet_batch()\n test_anchor, test_similar, test_dissimilar, test_labels = (\n test_dataset.next_triplet_batch())\n if train_anchor is None or test_anchor is None:\n continue\n train_dict = {anchor_placeholder: train_anchor,\n similar_placeholder: train_similar,\n dissimilar_placeholder: train_dissimilar,\n labels_placeholder: train_labels,\n is_training_placeholder: True}\n test_dict = {anchor_placeholder: test_anchor,\n similar_placeholder: test_similar,\n dissimilar_placeholder: test_dissimilar,\n labels_placeholder: test_labels,\n is_training_placeholder: False}\n _, _global_step = sess.run([train_step, global_step],\n feed_dict=train_dict)\n anchor_out, similar_out, dissimilar_out = sess.run([\n flatten_out_anchor, flatten_out_similar,\n flatten_out_dissimilar], feed_dict=train_dict)\n _train_loss, _train_pos_dist, _train_neg_dist = sess.run([\n loss, pos_dist, neg_dist], feed_dict=train_dict)\n _test_loss, _test_pos_dist, _test_neg_dist = sess.run([loss,\n pos_dist, neg_dist], feed_dict=test_dict)\n print('distance:', list(zip(_train_pos_dist.flatten(),\n _train_neg_dist.flatten()))[:5])\n one_moving_meaning_show = 'No mean or variance'\n if len(bn_moving_vars) > 0:\n one_moving_meaning = sess.graph.get_tensor_by_name(\n bn_moving_vars[0].name)\n one_moving_meaning_show = '{}={}'.format(bn_moving_vars\n [0].name, np.mean(one_moving_meaning.eval()))\n print(one_moving_meaning_show)\n show_text = (\n 'epoch:{},epoch-iters:{},total-iters:{},loss:{},lr:{},val:{}'\n .format(epoch_num, iters_num + 1, _global_step,\n _train_loss, '0.99', _test_loss))\n print(show_text)\n if _global_step % snapshot == 0:\n constant_graph = graph_util.convert_variables_to_constants(\n sess, sess.graph_def, ['flatten_anchor'])\n save_model_name = model_name + '-' + str(_global_step\n ) + '.pb'\n with tf.gfile.FastGFile(pb_path + save_model_name, mode\n ='wb') as fw:\n fw.write(constant_graph.SerializeToString())\n ckpt_saver.save(sess, ckpt_path + model_name + '.ckpt',\n global_step=total_iters_num)\n print('Successfully saved model {}'.format(save_model_name)\n )\n", "<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n" ]
false