code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Validator for HWID configs."""
from typing import List
from cros.factory.hwid.service.appengine.config import CONFIG
from cros.factory.hwid.service.appengine import \
verification_payload_generator as vpg_module
from cros.factory.hwid.v3 import contents_analyzer
from cros.factory.hwid.v3 import database
ErrorCode = contents_analyzer.ErrorCode
Error = contents_analyzer.Error
class ValidationError(Exception):
"""An exception class that indicates validation failures."""
def __init__(self, errors: List[Error]):
super().__init__(str(errors))
self.errors = errors
class HwidValidator:
"""Validates HWID configs."""
def Validate(self, hwid_config_contents):
"""Validates a HWID config.
Uses strict validation (i.e. includes checksum validation).
Args:
hwid_config_contents: the current HWID config as a string.
"""
expected_checksum = database.Database.ChecksumForText(hwid_config_contents)
contents_analyzer_inst = contents_analyzer.ContentsAnalyzer(
hwid_config_contents, expected_checksum, None)
report = contents_analyzer_inst.ValidateIntegrity()
if report.errors:
raise ValidationError(report.errors)
def ValidateChange(self, hwid_config_contents, prev_hwid_config_contents):
"""Validates a HWID config change.
This method validates the current config (strict, i.e. including its
checksum), the previous config (non strict, i.e. no checksum validation)
and the change itself (e.g. bitfields may only be appended at the end, not
inserted in the middle).
Args:
hwid_config_contents: the current HWID config as a string.
prev_hwid_config_contents: the previous HWID config as a string.
Returns:
A tuple (project, new_comps) where new_comps is a dict in the form of
{category: [(ciq, qid, status),...]} which collects created/updated
component names in the ${category}_${cid}_${qid} pattern.
"""
expected_checksum = database.Database.ChecksumForText(hwid_config_contents)
analyzer = contents_analyzer.ContentsAnalyzer(
hwid_config_contents, expected_checksum, prev_hwid_config_contents)
report_of_change = analyzer.ValidateChange()
if report_of_change.errors:
raise ValidationError(report_of_change.errors)
report_of_integrity = analyzer.ValidateIntegrity()
if report_of_integrity.errors:
raise ValidationError(report_of_integrity.errors)
db = analyzer.curr_db_instance
vpg_target = CONFIG.vpg_targets.get(db.project)
if vpg_target:
errors = vpg_module.GenerateVerificationPayload(
[(db, vpg_target.waived_comp_categories)]).error_msgs
if errors:
raise ValidationError(
[Error(ErrorCode.CONTENTS_ERROR, err) for err in errors])
return db.project, report_of_change.name_changed_components
| [
"cros.factory.hwid.v3.database.Database.ChecksumForText",
"cros.factory.hwid.service.appengine.config.CONFIG.vpg_targets.get",
"cros.factory.hwid.service.appengine.verification_payload_generator.GenerateVerificationPayload",
"cros.factory.hwid.v3.contents_analyzer.ContentsAnalyzer"
] | [((1060, 1115), 'cros.factory.hwid.v3.database.Database.ChecksumForText', 'database.Database.ChecksumForText', (['hwid_config_contents'], {}), '(hwid_config_contents)\n', (1093, 1115), False, 'from cros.factory.hwid.v3 import database\n'), ((1146, 1231), 'cros.factory.hwid.v3.contents_analyzer.ContentsAnalyzer', 'contents_analyzer.ContentsAnalyzer', (['hwid_config_contents', 'expected_checksum', 'None'], {}), '(hwid_config_contents, expected_checksum,\n None)\n', (1180, 1231), False, 'from cros.factory.hwid.v3 import contents_analyzer\n'), ((2140, 2195), 'cros.factory.hwid.v3.database.Database.ChecksumForText', 'database.Database.ChecksumForText', (['hwid_config_contents'], {}), '(hwid_config_contents)\n', (2173, 2195), False, 'from cros.factory.hwid.v3 import database\n'), ((2211, 2317), 'cros.factory.hwid.v3.contents_analyzer.ContentsAnalyzer', 'contents_analyzer.ContentsAnalyzer', (['hwid_config_contents', 'expected_checksum', 'prev_hwid_config_contents'], {}), '(hwid_config_contents, expected_checksum,\n prev_hwid_config_contents)\n', (2245, 2317), False, 'from cros.factory.hwid.v3 import contents_analyzer\n'), ((2658, 2692), 'cros.factory.hwid.service.appengine.config.CONFIG.vpg_targets.get', 'CONFIG.vpg_targets.get', (['db.project'], {}), '(db.project)\n', (2680, 2692), False, 'from cros.factory.hwid.service.appengine.config import CONFIG\n'), ((2727, 2813), 'cros.factory.hwid.service.appengine.verification_payload_generator.GenerateVerificationPayload', 'vpg_module.GenerateVerificationPayload', (['[(db, vpg_target.waived_comp_categories)]'], {}), '([(db, vpg_target.\n waived_comp_categories)])\n', (2765, 2813), True, 'from cros.factory.hwid.service.appengine import verification_payload_generator as vpg_module\n')] |
import re
sentence = input().lower()
word = input().lower()
pattern = f'{word}\\b'
matches = re.findall(pattern, sentence)
count = len(matches)
print(count)
| [
"re.findall"
] | [((101, 130), 're.findall', 're.findall', (['pattern', 'sentence'], {}), '(pattern, sentence)\n', (111, 130), False, 'import re\n')] |
#!/usr/bin/python
import os
import sys
import time
import xlwt
from lwt_testcasereader import TestCaseReader
from lwt_testhost import TestHost
from lwt_testcase import TestCase
from lwtestssh import mysshError
from lwt_config import *
CONN_RST = "Connection reset by peer"
class TestHarness:
def __init__(self, testcasesfile, hostsfile, email, suitename="LWTestReport"):
self.__testfile = testcasesfile
self.__hostsfile = hostsfile
self.__logfile = suitename + ".xls"
self.__logsep = "\n"
self.__logsepEx = "\n\n"
self.__emailaddress = email
try:
self.__testcasereader = TestCaseReader(self.__testfile)
self.__testcasereader.CopyHeader();
self.__testcasereader.CopyField()
self.__hostread = open(self.__hostsfile, "r")
self.__lwtestreport = xlwt.Workbook(encoding="utf-8")
self.__testsummary = self.__lwtestreport.add_sheet("Test Summary")
self.__distroresult = 0
self.AddTestSummaryHeader()
self.TestMachines()
self.__lwtestreport.save(self.__logfile)
self.__hostread.close()
self.MailResults(self.__emailaddress)
except:
self.__lwtestreport.save(self.__logfile)
self.__hostread.close()
def AddTestSummaryHeader(self):
self.__testsummary.write(0, 0, "Host name")
self.__testsummary.write(0, 1, "IP")
self.__testsummary.write(0, 2, "Platform")
self.__testsummary.write(0, 3, "Distribution")
self.__testsummary.write(0, 4, "Processor Arch")
self.__testsummary.write(0, 5, "Version")
self.__testsummary.write(0, 6, "Total testcases")
self.__testsummary.write(0, 7, "Pass")
self.__testsummary.write(0, 8, "Fail")
self.__testsummary.write(0, 9, "Skip")
def TestMachines(self):
machinecount = 0
hostinfo = self.__hostread.readline()
while hostinfo != "":
try:
if not hostinfo.startswith("#"):
host = hostinfo.split(",")
if len(host) >= 3: #ip, root login username, root password
machinecount += 1
testhost = TestHost(host[0], host[1], host[2])
if testhost.rootlogin():
testhost.findplatform()
testhost.finddistribution()
testhost.findipaddress()
testhost.findhostname()
testhost.findbitversion()
self.__testsummary.write(machinecount, 0, testhost.hostname)
self.__testsummary.write(machinecount, 1, testhost.ipaddress)
self.__testsummary.write(machinecount, 2, testhost.platform)
self.__testsummary.write(machinecount, 3, testhost.distribution)
self.__testsummary.write(machinecount, 4, testhost.bitcompat)
self.__testsummary.write(machinecount, 5, testhost.distroversion)
self.__lwtestreport.save(self.__logfile)
try:
self.__distroresult = self.__lwtestreport.add_sheet(testhost.hostname)
except:
self.__distroresult = self.__lwtestreport.add_sheet(testhost.inputhostname)
self.AddTestingHeader()
Total, Pass, Fail = self.RunTestCases(testhost)
self.__testsummary.write(machinecount, 6, Total)
self.__testsummary.write(machinecount, 7, Pass)
self.__testsummary.write(machinecount, 8, Fail)
self.__testsummary.write(machinecount, 9, (Total - (Pass + Fail)))
self.__lwtestreport.save(self.__logfile)
testhost.rootlogout()
else:
testhost.__loginerror = True
self.__testsummary.write(machinecount, 0, host[0])
self.__testsummary.write(machinecount, 8, testhost.loginerrmsg)
#testhost.zipwinbindlogs()
hostinfo = self.__hostread.readline()
except:
hostinfo = self.__hostread.readline()
pass
def AddTestingHeader(self):
self.__distroresult.write(0, 0, "Testcase ID")
self.__distroresult.write(0, 1, "Description")
self.__distroresult.write(0, 2, "Test Result")
self.__distroresult.write(0, 3, "Test case")
self.__distroresult.write(0, 4, "Expected Result")
self.__distroresult.write(0, 5, "System Result")
self.__distroresult.write(0, 6, "Test case")
self.__distroresult.write(0, 7, "Expected Result")
self.__distroresult.write(0, 8, "System Result")
self.__distroresult.write(0, 9, "Test case")
self.__distroresult.write(0, 10, "Expected Result")
self.__distroresult.write(0, 11, "System Result")
self.__distroresult.write(0, 12, "Test case")
self.__distroresult.write(0, 13, "Expected Result")
self.__distroresult.write(0, 14, "System Result")
#Get all test cases from test case and execute them
def RunTestCases(self,host):
TotalTestCases = 0
PassTestCases = 0
FailTestCases = 0
index = 0
reportindex = 0
try:
maxrow = self.__testcasereader.GetRowCount()
if index == maxrow:
return TotalTestCases, PassTestCases, FailTestCases
testcase = TestCase(host)
while index < maxrow:
testcaseline = self.__testcasereader.GetTestCaseLine(index)
if not str(testcaseline[0]).startswith("#"):
testcase.executetest(testcaseline)
if testcase.result != "NOT RUN":
TotalTestCases += 1
reportindex += 1
if str(testcase.result) == "FAIL":
FailTestCases += 1
elif str(testcase.result) == "PASS":
PassTestCases += 1
self.__distroresult.write(reportindex, 0, str(testcaseline[0]))
self.__distroresult.write(reportindex, 1, testcaseline[1])
self.__distroresult.write(reportindex, 2, str(testcase.result))
column = 0
for command, expected, actual in testcase.testcaselog:
if actual.strip().lower().find(CONN_RST.strip().lower()) > -1:
if testcase.sshconn.connected:
host.rootlogout(True)
condition = True
timeout = 0
while (timeout < MAX_TIMEOUT and condition):
if host.rootlogin():
host.userloginname = host.rootloginname
host.userpass = host.rootpass
testcase.sshconn = host.rootconn
condition = False
time.sleep(2)
timeout += 2
if condition == True:
self.__distroresult.write(reportindex, column, command)
self.__distroresult.write(reportindex, column+1, expected)
self.__distroresult.write(reportindex, 0, actual.replace(" | ","\n").strip())
return
column += 3
self.__distroresult.write(reportindex, column, command)
self.__distroresult.write(reportindex, column+1, expected)
if str(testcase.result) == "PASS":
self.__distroresult.write(reportindex, column+2, expected)
else:
if actual.lower().find(expected.replace("\"","").lower().strip() ) != -1:
self.__distroresult.write(reportindex, column+2, expected)
else:
error = actual
error = "\t" + error.replace(" | ","\n").strip()
self.__distroresult.write(reportindex, column+2, error)
del testcase.testcaselog[:]
installcmd = "install"
domainjoincmd = "domainjoin"
regshellcmd = "regshellstart"
rebootcmd = "reboot"
expectedcmd = ""
error = ""
if str(testcase.result) == "FAIL":
if testcaseline[3].lower().strip().startswith(installcmd.lower().strip()):
error = "Error: Likewise product installation failed. Skipping all test cases till build uninstalls"
expectedcmd = "uninstall"
elif testcaseline[3].lower().startswith(regshellcmd.lower().strip()):
error = "Error: Failed to load regshell command tool. Skipping all regshell testcases"
expectedcmd = "regshellstop"
elif testcaseline[3].lower().strip().startswith(domainjoincmd.lower().strip()):
error = "Error: Domain join failed. Skipping all domain join test cases"
expectedcmd = "domainleave"
elif testcaseline[3].lower().strip().startswith(rebootcmd.lower().strip()):
error = "Error: System is not rebooted. Skipping all test cases "
expectedcmd = "reboot"
if expectedcmd != "":
index += 1
while index < maxrow:
testcaseline = self.__testcasereader.GetTestCaseLine(index)
if not str(testcaseline[0]).startswith("#"):
TotalTestCases += 1
if testcaseline[3].lower().strip().startswith(expectedcmd.lower().strip()):
break
index += 1
self.__distroresult.write(reportindex + 2, 0, "Error: build verification test fails")
self.__distroresult.write(reportindex + 3, 0, error)
reportindex += 3
testcaseline=""
self.__lwtestreport.save(self.__logfile)
index += 1
return TotalTestCases, PassTestCases, FailTestCases
except:
return TotalTestCases, PassTestCases, FailTestCases
def MailResults(self, emailaddress):
self.__logfile
if emailaddress != "None":
os.system("uuencode \"" + self.__logfile + "\" \"" + self.__logfile + "\" | mail -s \"Test Result\" " + emailaddress)
| [
"lwt_testhost.TestHost",
"time.sleep",
"lwt_testcasereader.TestCaseReader",
"lwt_testcase.TestCase",
"os.system",
"xlwt.Workbook"
] | [((658, 689), 'lwt_testcasereader.TestCaseReader', 'TestCaseReader', (['self.__testfile'], {}), '(self.__testfile)\n', (672, 689), False, 'from lwt_testcasereader import TestCaseReader\n'), ((878, 909), 'xlwt.Workbook', 'xlwt.Workbook', ([], {'encoding': '"""utf-8"""'}), "(encoding='utf-8')\n", (891, 909), False, 'import xlwt\n'), ((5947, 5961), 'lwt_testcase.TestCase', 'TestCase', (['host'], {}), '(host)\n', (5955, 5961), False, 'from lwt_testcase import TestCase\n'), ((11682, 11797), 'os.system', 'os.system', (['(\'uuencode "\' + self.__logfile + \'" "\' + self.__logfile +\n \'" | mail -s "Test Result" \' + emailaddress)'], {}), '(\'uuencode "\' + self.__logfile + \'" "\' + self.__logfile +\n \'" | mail -s "Test Result" \' + emailaddress)\n', (11691, 11797), False, 'import os\n'), ((2298, 2333), 'lwt_testhost.TestHost', 'TestHost', (['host[0]', 'host[1]', 'host[2]'], {}), '(host[0], host[1], host[2])\n', (2306, 2333), False, 'from lwt_testhost import TestHost\n'), ((7674, 7687), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (7684, 7687), False, 'import time\n')] |
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plot
plot.style.use('seaborn-whitegrid')
import numpy as np
import mvpissues
issues = mvpissues.issues
issues['morley'] = []
import gitlabprs
import mvpprs
import pprint
prs = mvpprs.prs
prs['morley'] = gitlabprs.prs['morley']
pp = pprint.PrettyPrinter(indent=2)
def mkFreqs(points, selector):
if len(points) == 0:
return []
freqs = {}
for x in points:
s = selector(x)
if s in freqs:
freqs[s] = freqs[s] + 1
else:
freqs[s] = 1
return sorted(freqs.items(), key=lambda kv: kv[0])
def mkAllFreqs(xs, selector):
allFreqs = {}
for points in xs:
allFreqs[points] = mkFreqs(xs[points], selector)
return allFreqs
def sortByFrequency(freqs):
return sorted(freqs, key=lambda kv: kv[1])
def unzip(xs):
ret = [[],[]]
for j in xs:
ret[0].append(j[0])
ret[1].append(j[1])
return ret
def render(x, y, xlabel, ylabel, title):
if(len(x) == 0):
return
if(len(y) == 0):
return
_fig, ax = plot.subplots()
ax.set_xticks(x)
plot.plot(x, y, 'ro')
plot.title(title)
plot.xlabel(xlabel)
plot.ylabel(ylabel)
plot.show()
def filterRelevantIssues(points):
return filter(lambda x: x[0] <= 3*30 and x[0] >= 2, points)
def filterRelevantPrs(points):
return filter(lambda x: x[0] <= 3*30, points)
def groupByDelta(points):
def getSndByFst(x, ys):
for y in ys:
if y[0] == x:
return y[1]
return 0
ret = []
prev = -1
for curr in [0,1,2,3,4,5,10,20,30,40,60,90]:
occurrences = 0
for j in range(prev + 1, curr + 1):
occurrences = occurrences + getSndByFst(j, points)
if occurrences > 0:
if curr == prev + 1:
ret.append([str(curr), occurrences])
else:
ret.append([str(prev + 1) + '-' + str(curr), occurrences])
prev = curr
return ret
if __name__ == '__main__':
allIssuesByDelta = mkAllFreqs(issues, lambda x: x['delta'])
allPrsByDelta = mkAllFreqs(prs, lambda x: x['delta'])
for compiler in allIssuesByDelta:
#xy = unzip(filterRelevantIssues(allIssuesByDelta[compiler]))
xy = unzip(groupByDelta(filterRelevantIssues(allIssuesByDelta[compiler])))
#render(xy[1], xy[0], 'amount of issues closed', 'days', compiler + u' issues (ν)')
#xy1 = unzip(filterRelevantPrs(allPrsByDelta[compiler]))
xy1 = unzip(groupByDelta(filterRelevantPrs(allPrsByDelta[compiler])))
render(xy1[1], xy1[0], 'amount of PRs closed', 'days', compiler + u' pull requests (ν)')
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.style.use",
"pprint.PrettyPrinter",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((82, 117), 'matplotlib.pyplot.style.use', 'plot.style.use', (['"""seaborn-whitegrid"""'], {}), "('seaborn-whitegrid')\n", (96, 117), True, 'import matplotlib.pyplot as plot\n'), ((314, 344), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(2)'}), '(indent=2)\n', (334, 344), False, 'import pprint\n'), ((1032, 1047), 'matplotlib.pyplot.subplots', 'plot.subplots', ([], {}), '()\n', (1045, 1047), True, 'import matplotlib.pyplot as plot\n'), ((1069, 1090), 'matplotlib.pyplot.plot', 'plot.plot', (['x', 'y', '"""ro"""'], {}), "(x, y, 'ro')\n", (1078, 1090), True, 'import matplotlib.pyplot as plot\n'), ((1093, 1110), 'matplotlib.pyplot.title', 'plot.title', (['title'], {}), '(title)\n', (1103, 1110), True, 'import matplotlib.pyplot as plot\n'), ((1113, 1132), 'matplotlib.pyplot.xlabel', 'plot.xlabel', (['xlabel'], {}), '(xlabel)\n', (1124, 1132), True, 'import matplotlib.pyplot as plot\n'), ((1135, 1154), 'matplotlib.pyplot.ylabel', 'plot.ylabel', (['ylabel'], {}), '(ylabel)\n', (1146, 1154), True, 'import matplotlib.pyplot as plot\n'), ((1157, 1168), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (1166, 1168), True, 'import matplotlib.pyplot as plot\n')] |
#C:\Python27\python.exe
#!/usr/bin/env python
# encoding: utf-8
import os, csv
"""
**** WRITE RESULTS TO FILE FUNCTIONS
"""
def WriteResultsToFile(resultDict, bcDict, seqDict, workdir, indexFile, customTxt=''):
indexString = os.path.basename(indexFile).split(".")[0].split("_")[1]
csvFile = os.path.join(workdir, "{}_barcode-mutation_count_{}.csv".format(indexString, customTxt))
with open(csvFile, "wb") as handle:
fieldnames = ['Barcode', 'Mutation', 'MutationCount', 'BCSequence', 'BCSequenceCount', 'MutatedBCassociatedWith', 'MutationVariants', 'Frequency', 'LostBarcodes']
writer = csv.DictWriter(handle, fieldnames=fieldnames, delimiter='\t')
writer.writeheader()
for barcodeID in resultDict:
BCSequence = ""
BCSequenceCount = ""
BCSequence = '\n'.join(str(x[0]) for x in bcDict[barcodeID]) + "\n"
BCSequenceCount = '\n'.join(str(x[1]) for x in bcDict[barcodeID]) + "\n"
TotalBCSequence = sum([x[1] for x in bcDict[barcodeID]])
if barcodeID in seqDict[barcodeID]:
MutVarForUnqBc = '\n'.join(str(x[0]) for x in seqDict[barcodeID][barcodeID])
CountMutForUnqBc = '\n'.join(str(x[1]) for x in seqDict[barcodeID][barcodeID])
TotalMutForUncBc = len(seqDict[barcodeID][barcodeID])
else:
MutVarForUnqBc = "none"
CountMutForUnqBc = "none"
TotalMutForUncBc = 1
if len(resultDict[barcodeID]) > 2:
wtd = resultDict[barcodeID][2]
if len(wtd) != 0:
tmplist=[]
for i in wtd.values():
tmplist.extend(i)
AssociatedMutations = '\n'.join(str(x[0]) for x in tmplist)
Frequency = '\n'.join(str(x[1]) for x in tmplist)
MutatedBarcodes = ""
for bc in wtd.keys():
countMut = len(wtd[bc])
if bc == wtd.keys()[len(wtd.keys())-1]:
MutatedBarcodes += str(bc)+"\n"*(countMut-1)
else:
MutatedBarcodes += str(bc)+"\n"*countMut
else:
MutatedBarcodes = Frequency = AssociatedMutations = ""
else:
MutatedBarcodes = Frequency = AssociatedMutations = ""
writer.writerow({
'Barcode': barcodeID,
'Mutation': resultDict[barcodeID][0],
'MutationCount': resultDict[barcodeID][1],
'BCSequence': BCSequence,
'BCSequenceCount': BCSequenceCount,
'MutatedBCassociatedWith': barcodeID+"\n"*TotalMutForUncBc+MutatedBarcodes,
'MutationVariants': MutVarForUnqBc+"\n"+AssociatedMutations,
'Frequency': CountMutForUnqBc+"\n"+Frequency,
'LostBarcodes': TotalBCSequence-resultDict[barcodeID][1]})
return os.path.basename(csvFile)
def WriteBcDictToFile(bcDict, workdir, indexFile, customTxt=''):
indexString = os.path.basename(indexFile).split(".")[0].split("_")[1]
csvFile = os.path.join(workdir, "{}_barcodeDictionary{}.csv".format(indexString, customTxt))
with open(csvFile, "wb") as handle:
fieldnames = ['Barcode', 'BCSequence', 'BCSequenceCount']
writer = csv.DictWriter(handle, fieldnames=fieldnames, delimiter='\t')
writer.writeheader()
for barcodeID in bcDict:
BCSequence = ""
BCSequenceCount = ""
BCSequence = '\n'.join(str(x[0]) for x in bcDict[barcodeID]) + "\n"
BCSequenceCount = '\n'.join(str(x[1]) for x in bcDict[barcodeID]) + "\n"
writer.writerow({
'Barcode': barcodeID,
'BCSequence': BCSequence,
'BCSequenceCount': BCSequenceCount})
return os.path.basename(csvFile)
def SimpleCsvWriter(resultDict, bcDict, workdir, indexFile, customTxt=''):
indexString = os.path.basename(indexFile).split(".")[0].split("_")[1]
csvFile = os.path.join(workdir, "{}_for_R_statistics{}.csv".format(indexString, customTxt))
if resultDict is None:
resultDict = bcDict
with open(csvFile, "wb") as handle:
fieldnames = ['Barcode', 'BCSequenceCount']
writer = csv.DictWriter(handle, fieldnames=fieldnames, delimiter='\t')
writer.writeheader()
for barcodeID in resultDict:
writer.writerow({
'Barcode': barcodeID,
'BCSequenceCount': sum([x[1] for x in bcDict[barcodeID]])})
return csvFile | [
"csv.DictWriter",
"os.path.basename"
] | [((3022, 3047), 'os.path.basename', 'os.path.basename', (['csvFile'], {}), '(csvFile)\n', (3038, 3047), False, 'import os, csv\n'), ((3932, 3957), 'os.path.basename', 'os.path.basename', (['csvFile'], {}), '(csvFile)\n', (3948, 3957), False, 'import os, csv\n'), ((617, 678), 'csv.DictWriter', 'csv.DictWriter', (['handle'], {'fieldnames': 'fieldnames', 'delimiter': '"""\t"""'}), "(handle, fieldnames=fieldnames, delimiter='\\t')\n", (631, 678), False, 'import os, csv\n'), ((3408, 3469), 'csv.DictWriter', 'csv.DictWriter', (['handle'], {'fieldnames': 'fieldnames', 'delimiter': '"""\t"""'}), "(handle, fieldnames=fieldnames, delimiter='\\t')\n", (3422, 3469), False, 'import os, csv\n'), ((4368, 4429), 'csv.DictWriter', 'csv.DictWriter', (['handle'], {'fieldnames': 'fieldnames', 'delimiter': '"""\t"""'}), "(handle, fieldnames=fieldnames, delimiter='\\t')\n", (4382, 4429), False, 'import os, csv\n'), ((230, 257), 'os.path.basename', 'os.path.basename', (['indexFile'], {}), '(indexFile)\n', (246, 257), False, 'import os, csv\n'), ((3132, 3159), 'os.path.basename', 'os.path.basename', (['indexFile'], {}), '(indexFile)\n', (3148, 3159), False, 'import os, csv\n'), ((4052, 4079), 'os.path.basename', 'os.path.basename', (['indexFile'], {}), '(indexFile)\n', (4068, 4079), False, 'import os, csv\n')] |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'Folder synchronization over (S)FTP',
'author': '<NAME>',
'url': 'https://github.com/marccarre/sftpsync',
'download_url': 'https://github.com/marccarre/sftpsync',
'author_email': '<EMAIL>',
'version': '0.1',
'install_requires': comma_separated_dependencies(),
'packages': ['sftpsync'],
'scripts': [],
'name': 'sftpsync'
}
def comma_separated_dependencies(exclusions=('#', 'nose', 'unittest2')):
with open('requirements.txt', 'r') as f:
return ','.join(dep.strip() for dep in f if len(dep.strip()) > 0 and all(not dep.startswith(e) for e in exclusions))
setup(**config)
| [
"distutils.core.setup"
] | [((729, 744), 'distutils.core.setup', 'setup', ([], {}), '(**config)\n', (734, 744), False, 'from distutils.core import setup\n')] |
from google.cloud import firestore
import os
class Firestore:
def __init__(self,data:dict):
self.instance_name = data['instance_name']
self.project = data['project']
self.machine_type = data['machine_type']
self.deployment_status = data['deployment_status']
self.job_status = data['job_status']
self.instance_ip = data['instance_ip']
self.deployed_on = data['deployed_on']
self.environment = data['environment']
def update_deployment(self):
db = firestore.Client(self.project)
update_data = {
'deployment_status': self.deployment_status,
'job_status': self.job_status,
'instance_ip': self.instance_ip,
'machine_type': self.machine_type,
'deployed_on': self.deployed_on
}
fb = db.collection("my-compute-firestore-table").document(self.environment).collection("compute_engine").document(self.instance_name)
result = fb.update(update_data)
print(result)
return {'status': "firestore update successfull"}
if __name__ == "__main__":
project = os.getenv('PROJECT_ID')
instance = os.getenv('instance_name')
f = Firestore({'instance_name': instance,'machine_type':'f4-micro','instance_ip': '123','project': project,'deployment_status': 'completed','job_status': 'running','deployed_on': '123','environment': 'prod'})
print(f.update_deployment()) | [
"google.cloud.firestore.Client",
"os.getenv"
] | [((1198, 1221), 'os.getenv', 'os.getenv', (['"""PROJECT_ID"""'], {}), "('PROJECT_ID')\n", (1207, 1221), False, 'import os\n'), ((1237, 1263), 'os.getenv', 'os.getenv', (['"""instance_name"""'], {}), "('instance_name')\n", (1246, 1263), False, 'import os\n'), ((581, 611), 'google.cloud.firestore.Client', 'firestore.Client', (['self.project'], {}), '(self.project)\n', (597, 611), False, 'from google.cloud import firestore\n')] |
import os
import sys
import pathlib
import subprocess
import pandas as pd
from datetime import datetime
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
from module.neuralnetwork.MLP import MLP
from module.reader.Reader import Reader
def learn_using_single_series(mlp, no_of_last_samples, learning_data):
for i in range(no_of_last_samples, len(learning_data) + 1):
inputs = [data[0] for data in learning_data[i - no_of_last_samples:i]] \
+ [data[1] for data in learning_data[i - no_of_last_samples:i]]
outputs = [learning_data[i - 1][2], learning_data[i - 1][3]]
mlp.learn(inputs, outputs, 0.05)
def test_using_single_series(mlp, no_of_last_samples, testing_data):
mlp_output = []
for i in range(no_of_last_samples, len(testing_data) + 1):
inputs = [data[0] for data in testing_data[i - no_of_last_samples:i]] \
+ [data[1] for data in testing_data[i - no_of_last_samples:i]]
mlp_output.append(mlp.test(inputs))
return mlp_output
def calculate_error(testing_data, mlp_output):
error = []
for i in range(len(testing_data) - len(mlp_output), len(testing_data)):
testing_data_row = testing_data[i]
mlp_output_row = mlp_output[i - (len(testing_data) - len(mlp_output))]
error.append((((testing_data_row[2] - mlp_output_row[0]) ** 2) +
((testing_data_row[3] - mlp_output_row[1]) ** 2)) ** 0.5)
return error
def calculate_distribution(testing_data, mlp_output):
errors = calculate_error(testing_data, mlp_output)
distribution = []
error = 0
while len(distribution) == 0 or distribution[-1] < len(errors):
distribution.append(len(list(filter(lambda e: e < error, errors))))
error += 1
distribution = [x / len(errors) for x in distribution]
return distribution
def make_animation(testing_data, mlp_output):
plt.clf()
plt.plot([row[0] for row in testing_data], [row[1] for row in testing_data], "ro")
plt.plot([row[2] for row in testing_data], [row[3] for row in testing_data], "go")
plt.plot([row[0] for row in mlp_output], [row[1] for row in mlp_output], "bo")
plt.draw()
plt.pause(0.01)
def main() -> None:
print("Reading data...")
reader = Reader()
learning_data_series = reader.load_learning_data_series()
testing_data = reader.load_testing_data()
print("Creating list of experiments...")
experiments = []
i = 1
while i < len(sys.argv):
no_of_last_samples = int(sys.argv[i])
hidden_layers = [sys.argv[i + 2 + j] for j in range(int(sys.argv[i + 1]))]
experiments.append([no_of_last_samples] + hidden_layers)
i += len(hidden_layers) + 2
print(experiments)
experiment_number = 1
for experiment in experiments:
print("\nExperiment: " + str(experiment_number) + "/" + str(len(experiments)))
experiment_number += 1
print("\tCreating MLP...")
no_of_last_samples = experiment[0]
mlp = MLP([str(no_of_last_samples * 2)] + experiment[1:] + ['2s'])
print("\tTraining...")
same_error_counter = 0
errors = [0.0]
while same_error_counter < 10:
#learn - single epoch
for learning_data in learning_data_series:
learn_using_single_series(mlp, no_of_last_samples, learning_data)
#calculate and print error
mlp_output = test_using_single_series(mlp, no_of_last_samples, testing_data)
errors.append(sum(calculate_error(testing_data, mlp_output)))
if abs(errors[-1] - errors[-2]) < 1:
same_error_counter += 1
else:
same_error_counter = 0
print("\t\t" + str(len(errors) - 1) + ": " + str(errors[-1]))
#make animation (if there is only one experiment on the list)
#if len(experiments) == 1:
# make_animation(testing_data, mlp_output)
filename = ""
for i in experiment:
filename += str(i) + "_"
print("\tSaving graph to file...")
plt.clf()
plt.plot([row[0] for row in testing_data], [row[1] for row in testing_data], "ro")
plt.plot([row[2] for row in testing_data], [row[3] for row in testing_data], "go")
plt.plot([row[0] for row in mlp_output], [row[1] for row in mlp_output], "bo")
plt.savefig(filename + "graph.jpg")
print("\tSaving global errors from all iterations to file...")
errors_file = open(filename + "errors", "w")
errors_file.writelines([str(error) + "\n" for error in errors[1:]])
errors_file.close()
print("\tSaving neurons' weights to file...")
weights_file = open(filename + "weights", "w")
weights_file.write(mlp.weights_to_string())
weights_file.close()
print("\tCalculating cumulative distribution and saving it to file...")
denormalized_testing_data = reader.denormalize_testing_data(testing_data)
denormalized_mlp_output = reader.denormalize_mlp_output(mlp_output)
distribution_learned = calculate_distribution(denormalized_testing_data, denormalized_mlp_output)
distribution_original = calculate_distribution(denormalized_testing_data, denormalized_testing_data)
plt.clf()
plt.plot(range(len(distribution_learned)), distribution_learned, "ro")
plt.plot(range(len(distribution_original)), distribution_original, "bo")
plt.savefig(filename + "distribution_graph.jpg")
pd.DataFrame({"distribution": distribution_learned}).to_excel(filename + "distribution.xlsx", index=False, header=False)
display_finish()
# UTIL ------------------------------------------------------------------------ #
def display_finish() -> None:
print("------------------------------------------------------------------------")
print("FINISHED")
print("------------------------------------------------------------------------")
if __name__ == "__main__":
#subprocess.call(["flake8", "."])
main()
| [
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"matplotlib.use",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.plot",
"module.reader.Reader.Reader",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.draw"
] | [((123, 144), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (137, 144), False, 'import matplotlib\n'), ((1919, 1928), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1926, 1928), True, 'import matplotlib.pyplot as plt\n'), ((1933, 2019), 'matplotlib.pyplot.plot', 'plt.plot', (['[row[0] for row in testing_data]', '[row[1] for row in testing_data]', '"""ro"""'], {}), "([row[0] for row in testing_data], [row[1] for row in testing_data],\n 'ro')\n", (1941, 2019), True, 'import matplotlib.pyplot as plt\n'), ((2020, 2106), 'matplotlib.pyplot.plot', 'plt.plot', (['[row[2] for row in testing_data]', '[row[3] for row in testing_data]', '"""go"""'], {}), "([row[2] for row in testing_data], [row[3] for row in testing_data],\n 'go')\n", (2028, 2106), True, 'import matplotlib.pyplot as plt\n'), ((2107, 2185), 'matplotlib.pyplot.plot', 'plt.plot', (['[row[0] for row in mlp_output]', '[row[1] for row in mlp_output]', '"""bo"""'], {}), "([row[0] for row in mlp_output], [row[1] for row in mlp_output], 'bo')\n", (2115, 2185), True, 'import matplotlib.pyplot as plt\n'), ((2190, 2200), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (2198, 2200), True, 'import matplotlib.pyplot as plt\n'), ((2205, 2220), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.01)'], {}), '(0.01)\n', (2214, 2220), True, 'import matplotlib.pyplot as plt\n'), ((2284, 2292), 'module.reader.Reader.Reader', 'Reader', ([], {}), '()\n', (2290, 2292), False, 'from module.reader.Reader import Reader\n'), ((4130, 4139), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4137, 4139), True, 'import matplotlib.pyplot as plt\n'), ((4148, 4234), 'matplotlib.pyplot.plot', 'plt.plot', (['[row[0] for row in testing_data]', '[row[1] for row in testing_data]', '"""ro"""'], {}), "([row[0] for row in testing_data], [row[1] for row in testing_data],\n 'ro')\n", (4156, 4234), True, 'import matplotlib.pyplot as plt\n'), ((4239, 4325), 'matplotlib.pyplot.plot', 'plt.plot', (['[row[2] for row in testing_data]', '[row[3] for row in testing_data]', '"""go"""'], {}), "([row[2] for row in testing_data], [row[3] for row in testing_data],\n 'go')\n", (4247, 4325), True, 'import matplotlib.pyplot as plt\n'), ((4330, 4408), 'matplotlib.pyplot.plot', 'plt.plot', (['[row[0] for row in mlp_output]', '[row[1] for row in mlp_output]', '"""bo"""'], {}), "([row[0] for row in mlp_output], [row[1] for row in mlp_output], 'bo')\n", (4338, 4408), True, 'import matplotlib.pyplot as plt\n'), ((4417, 4452), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(filename + 'graph.jpg')"], {}), "(filename + 'graph.jpg')\n", (4428, 4452), True, 'import matplotlib.pyplot as plt\n'), ((5335, 5344), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5342, 5344), True, 'import matplotlib.pyplot as plt\n'), ((5513, 5561), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(filename + 'distribution_graph.jpg')"], {}), "(filename + 'distribution_graph.jpg')\n", (5524, 5561), True, 'import matplotlib.pyplot as plt\n'), ((5570, 5622), 'pandas.DataFrame', 'pd.DataFrame', (["{'distribution': distribution_learned}"], {}), "({'distribution': distribution_learned})\n", (5582, 5622), True, 'import pandas as pd\n')] |
import inspect
from numpydoc.docscrape import ClassDoc, FunctionDoc
def numpydoc_type_desc(thing):
if inspect.isfunction(thing) or inspect.ismethod(thing):
docs = FunctionDoc(thing)
elif inspect.isclass(thing):
docs = ClassDoc(thing)
else:
raise RuntimeError("Don't know how to handle " + repr(thing))
npdoc_params = docs["Parameters"]
types = [p.type for p in npdoc_params]
descs = [" ".join(p.desc) for p in npdoc_params]
return types, descs
| [
"inspect.ismethod",
"numpydoc.docscrape.ClassDoc",
"numpydoc.docscrape.FunctionDoc",
"inspect.isclass",
"inspect.isfunction"
] | [((106, 131), 'inspect.isfunction', 'inspect.isfunction', (['thing'], {}), '(thing)\n', (124, 131), False, 'import inspect\n'), ((135, 158), 'inspect.ismethod', 'inspect.ismethod', (['thing'], {}), '(thing)\n', (151, 158), False, 'import inspect\n'), ((175, 193), 'numpydoc.docscrape.FunctionDoc', 'FunctionDoc', (['thing'], {}), '(thing)\n', (186, 193), False, 'from numpydoc.docscrape import ClassDoc, FunctionDoc\n'), ((203, 225), 'inspect.isclass', 'inspect.isclass', (['thing'], {}), '(thing)\n', (218, 225), False, 'import inspect\n'), ((242, 257), 'numpydoc.docscrape.ClassDoc', 'ClassDoc', (['thing'], {}), '(thing)\n', (250, 257), False, 'from numpydoc.docscrape import ClassDoc, FunctionDoc\n')] |
import json
import random
from mock import Mock
from hamcrest import (
assert_that, has_properties, has_entries,
equal_to
)
from stagecraft.apps.dashboards.models import Dashboard, Module
from stagecraft.apps.dashboards.tests.factories.factories import(
ModuleTypeFactory, DashboardFactory)
from stagecraft.apps.datasets.tests.factories import(
DataSetFactory)
from ..spreadsheets import SpreadsheetMunger
from ..import_dashboards import (dashboard_from_record,
set_dashboard_attributes,
import_dashboard,
determine_modules_for_dashboard)
with open('stagecraft/tools/fixtures/tx.json') as f:
tx_worksheet = json.loads(f.read())
with open('stagecraft/tools/fixtures/names.json') as f:
names_worksheet = json.loads(f.read())
def test_attributes_from_record():
munger = SpreadsheetMunger({
'names_transaction_name': 11,
'names_transaction_slug': 12,
'names_service_name': 9,
'names_service_slug': 10,
'names_tx_id': 19,
'names_other_notes': 17,
'names_description': 8,
})
mock_account = Mock()
mock_account.open_by_key()\
.worksheet().get_all_values.return_value = tx_worksheet
tx = munger.load_tx_worksheet(mock_account)
mock_account = Mock()
mock_account.open_by_key()\
.worksheet().get_all_values.return_value = names_worksheet
names = munger.load_names_worksheet(mock_account)
record = munger.merge(tx, names)[0]
dashboard = Dashboard()
dashboard = set_dashboard_attributes(dashboard, record, False)
assert_that(dashboard, has_properties({
'title': record['name'],
'description': record['description'],
'costs': record['costs'],
'other_notes': record['other_notes'],
'dashboard_type': 'high-volume-transaction',
'customer_type': record['customer_type'],
'business_model': record['business_model'],
'published': False
}))
def test_truncated_slug_is_replaced():
munger = SpreadsheetMunger({
'names_transaction_name': 11,
'names_transaction_slug': 12,
'names_service_name': 9,
'names_service_slug': 10,
'names_tx_id': 19,
'names_other_notes': 17,
'names_description': 8,
})
mock_account = Mock()
mock_account.open_by_key() \
.worksheet().get_all_values.return_value = tx_worksheet
tx = munger.load_tx_worksheet(mock_account)
mock_account = Mock()
mock_account.open_by_key() \
.worksheet().get_all_values.return_value = names_worksheet
names = munger.load_names_worksheet(mock_account)
record = munger.merge(tx, names)[0]
truncated_slug = 'truncated-{}'.format(random.randrange(1e7))
record['tx_truncated'] = truncated_slug
dashboard = DashboardFactory(slug=truncated_slug)
dashboard = dashboard_from_record(record)
assert_that(dashboard, has_properties({
'slug': record['tx_id']
}))
def test_truncated_slug_is_replaced_in_modules():
DataSetFactory(
data_group__name='transactional-services',
data_type__name='summaries')
ModuleTypeFactory(name='kpi')
ModuleTypeFactory(name='bar_chart_with_number')
munger = SpreadsheetMunger({
'names_transaction_name': 11,
'names_transaction_slug': 12,
'names_service_name': 9,
'names_service_slug': 10,
'names_tx_id': 19,
'names_other_notes': 17,
'names_description': 8,
})
mock_account = Mock()
mock_account.open_by_key() \
.worksheet().get_all_values.return_value = tx_worksheet
tx = munger.load_tx_worksheet(mock_account)
mock_account = Mock()
mock_account.open_by_key() \
.worksheet().get_all_values.return_value = names_worksheet
names = munger.load_names_worksheet(mock_account)
record = munger.merge(tx, names)[0]
truncated_slug = 'truncated-{}'.format(random.randrange(1e7))
full_tx_id = record['tx_id']
record['tx_id'] = truncated_slug
DashboardFactory(slug=truncated_slug)
summaries = [
{
'service_id': record['tx_id'],
'type': 'quarterly',
'cost_per_transaction': 0,
'digital_takeup': 0
},
{
'service_id': record['tx_id'],
'type': 'seasonally-adjusted',
'total_cost': 0
},
{
'service_id': full_tx_id,
'type': 'quarterly',
'cost_per_transaction': 0,
'digital_takeup': 0
},
{
'service_id': full_tx_id,
'type': 'seasonally-adjusted',
'total_cost': 0
}
]
import_dashboard(record, summaries, dry_run=False)
initial_modules = Module.objects.all()
service_id_filters = set(
[module.query_parameters['filter_by'][0] for
module in initial_modules])
assert_that(len(service_id_filters), equal_to(1))
assert_that(
service_id_filters.pop(),
equal_to('service_id:{}'.format(truncated_slug)))
record['tx_id'] = full_tx_id
record['tx_truncated'] = truncated_slug
import_dashboard(record, summaries, dry_run=False)
new_modules = Module.objects.all()
assert_that(len(new_modules), equal_to(len(initial_modules)))
service_id_filters = set(
[module.query_parameters['filter_by'][0] for
module in new_modules])
assert_that(len(service_id_filters), equal_to(1))
assert_that(
service_id_filters.pop(),
equal_to('service_id:{}'.format(full_tx_id)))
def test_published_unmodified():
"""
Unless publish is True, dashboard.published should
not be modified if already set.
"""
record = {
'name': '<NAME>',
'tx_id': 'test-dashboard',
'department': {
'abbr': 'DEPT',
'name': 'Dept',
'slug': 'dept'
},
'high_volume': False
}
dashboard = Dashboard()
dashboard.published = True
dashboard = set_dashboard_attributes(dashboard, record, False)
assert_that(dashboard, has_properties({
'title': record['name'],
'published': True
}))
def test_unset_published_modified():
"""
If published is not set, it should be set to False unless
the 'publish' param is True.
"""
record = {
'name': '<NAME>',
'tx_id': 'test-dashboard',
'department': {
'abbr': 'DEPT',
'name': 'Dept',
'slug': 'dept'
},
'high_volume': False
}
dashboard = Dashboard()
dashboard = set_dashboard_attributes(dashboard, record, False)
assert_that(dashboard, has_properties({
'title': record['name'],
'published': False
}))
def test_update_published():
"""
If published is set, it should be set to True if
the 'publish' param is True.
"""
record = {
'name': '<NAME>',
'tx_id': 'test-dashboard',
'department': {
'abbr': 'DEPT',
'name': 'Dept',
'slug': 'dept'
},
'high_volume': False
}
dashboard = Dashboard()
dashboard = set_dashboard_attributes(dashboard, record, True)
assert_that(dashboard, has_properties({
'title': record['name'],
'published': True
}))
def test_compulsory_modules():
"""
These modules should always be present, regardless of the data available.
"""
summaries = []
module_types = determine_modules_for_dashboard(summaries, 'tx_id')
assert_that(module_types, has_entries({
'transactions_per_year': True,
'transactions_per_quarter': True,
}))
def test_digital_takeup_present_seasonal():
summaries = [
{
'service_id': 'tx_id',
'type': 'seasonally-adjusted',
'digital_takeup': 240542,
}
]
module_types = determine_modules_for_dashboard(summaries, 'tx_id')
assert_that(module_types, has_entries({
'transactions_per_year': True,
'transactions_per_quarter': True,
'digital_takeup': True,
}))
def test_digital_takeup_present_quarterly():
summaries = [
{
'service_id': 'tx_id',
'type': 'quarterly',
'digital_takeup': 240542,
}
]
module_types = determine_modules_for_dashboard(summaries, 'tx_id')
assert_that(module_types, has_entries({
'transactions_per_year': True,
'transactions_per_quarter': True,
'digital_takeup': True,
}))
def test_total_cost_present():
summaries = [
{
'service_id': 'tx_id',
'type': 'seasonally-adjusted',
'total_cost': 240542,
}
]
module_types = determine_modules_for_dashboard(summaries, 'tx_id')
assert_that(module_types, has_entries({
'transactions_per_year': True,
'transactions_per_quarter': True,
'total_cost': True,
}))
def test_cost_per_transaction_present():
summaries = [
{
'service_id': 'tx_id',
'type': 'seasonally-adjusted',
'cost_per_transaction': 240542,
}
]
module_types = determine_modules_for_dashboard(summaries, 'tx_id')
assert_that(module_types, has_entries({
'transactions_per_year': True,
'transactions_per_quarter': True,
'cost_per_transaction': True,
}))
def test_digital_takeup_present_seasonal_when_0():
summaries = [
{
'service_id': 'tx_id',
'type': 'seasonally-adjusted',
'digital_takeup': 0,
}
]
module_types = determine_modules_for_dashboard(summaries, 'tx_id')
assert_that(module_types, has_entries({
'transactions_per_year': True,
'transactions_per_quarter': True,
'digital_takeup': True,
}))
def test_digital_takeup_present_quarterly_when_0():
summaries = [
{
'service_id': 'tx_id',
'type': 'quarterly',
'digital_takeup': 0,
}
]
module_types = determine_modules_for_dashboard(summaries, 'tx_id')
assert_that(module_types, has_entries({
'transactions_per_year': True,
'transactions_per_quarter': True,
'digital_takeup': True,
}))
def test_total_cost_present_when_0():
summaries = [
{
'service_id': 'tx_id',
'type': 'seasonally-adjusted',
'total_cost': 0,
}
]
module_types = determine_modules_for_dashboard(summaries, 'tx_id')
assert_that(module_types, has_entries({
'transactions_per_year': True,
'transactions_per_quarter': True,
'total_cost': True,
}))
def test_cost_per_transaction_present_when_0():
summaries = [
{
'service_id': 'tx_id',
'type': 'seasonally-adjusted',
'cost_per_transaction': 0,
}
]
module_types = determine_modules_for_dashboard(summaries, 'tx_id')
assert_that(module_types, has_entries({
'transactions_per_year': True,
'transactions_per_quarter': True,
'cost_per_transaction': True,
}))
| [
"stagecraft.apps.dashboards.models.Dashboard",
"random.randrange",
"mock.Mock",
"hamcrest.has_entries",
"stagecraft.apps.datasets.tests.factories.DataSetFactory",
"stagecraft.apps.dashboards.tests.factories.factories.DashboardFactory",
"hamcrest.has_properties",
"stagecraft.apps.dashboards.tests.factories.factories.ModuleTypeFactory",
"stagecraft.apps.dashboards.models.Module.objects.all",
"hamcrest.equal_to"
] | [((1185, 1191), 'mock.Mock', 'Mock', ([], {}), '()\n', (1189, 1191), False, 'from mock import Mock\n'), ((1356, 1362), 'mock.Mock', 'Mock', ([], {}), '()\n', (1360, 1362), False, 'from mock import Mock\n'), ((1574, 1585), 'stagecraft.apps.dashboards.models.Dashboard', 'Dashboard', ([], {}), '()\n', (1583, 1585), False, 'from stagecraft.apps.dashboards.models import Dashboard, Module\n'), ((2384, 2390), 'mock.Mock', 'Mock', ([], {}), '()\n', (2388, 2390), False, 'from mock import Mock\n'), ((2556, 2562), 'mock.Mock', 'Mock', ([], {}), '()\n', (2560, 2562), False, 'from mock import Mock\n'), ((2885, 2922), 'stagecraft.apps.dashboards.tests.factories.factories.DashboardFactory', 'DashboardFactory', ([], {'slug': 'truncated_slug'}), '(slug=truncated_slug)\n', (2901, 2922), False, 'from stagecraft.apps.dashboards.tests.factories.factories import ModuleTypeFactory, DashboardFactory\n'), ((3110, 3201), 'stagecraft.apps.datasets.tests.factories.DataSetFactory', 'DataSetFactory', ([], {'data_group__name': '"""transactional-services"""', 'data_type__name': '"""summaries"""'}), "(data_group__name='transactional-services', data_type__name=\n 'summaries')\n", (3124, 3201), False, 'from stagecraft.apps.datasets.tests.factories import DataSetFactory\n'), ((3218, 3247), 'stagecraft.apps.dashboards.tests.factories.factories.ModuleTypeFactory', 'ModuleTypeFactory', ([], {'name': '"""kpi"""'}), "(name='kpi')\n", (3235, 3247), False, 'from stagecraft.apps.dashboards.tests.factories.factories import ModuleTypeFactory, DashboardFactory\n'), ((3252, 3299), 'stagecraft.apps.dashboards.tests.factories.factories.ModuleTypeFactory', 'ModuleTypeFactory', ([], {'name': '"""bar_chart_with_number"""'}), "(name='bar_chart_with_number')\n", (3269, 3299), False, 'from stagecraft.apps.dashboards.tests.factories.factories import ModuleTypeFactory, DashboardFactory\n'), ((3596, 3602), 'mock.Mock', 'Mock', ([], {}), '()\n', (3600, 3602), False, 'from mock import Mock\n'), ((3768, 3774), 'mock.Mock', 'Mock', ([], {}), '()\n', (3772, 3774), False, 'from mock import Mock\n'), ((4111, 4148), 'stagecraft.apps.dashboards.tests.factories.factories.DashboardFactory', 'DashboardFactory', ([], {'slug': 'truncated_slug'}), '(slug=truncated_slug)\n', (4127, 4148), False, 'from stagecraft.apps.dashboards.tests.factories.factories import ModuleTypeFactory, DashboardFactory\n'), ((4845, 4865), 'stagecraft.apps.dashboards.models.Module.objects.all', 'Module.objects.all', ([], {}), '()\n', (4863, 4865), False, 'from stagecraft.apps.dashboards.models import Dashboard, Module\n'), ((5301, 5321), 'stagecraft.apps.dashboards.models.Module.objects.all', 'Module.objects.all', ([], {}), '()\n', (5319, 5321), False, 'from stagecraft.apps.dashboards.models import Dashboard, Module\n'), ((6052, 6063), 'stagecraft.apps.dashboards.models.Dashboard', 'Dashboard', ([], {}), '()\n', (6061, 6063), False, 'from stagecraft.apps.dashboards.models import Dashboard, Module\n'), ((6670, 6681), 'stagecraft.apps.dashboards.models.Dashboard', 'Dashboard', ([], {}), '()\n', (6679, 6681), False, 'from stagecraft.apps.dashboards.models import Dashboard, Module\n'), ((7241, 7252), 'stagecraft.apps.dashboards.models.Dashboard', 'Dashboard', ([], {}), '()\n', (7250, 7252), False, 'from stagecraft.apps.dashboards.models import Dashboard, Module\n'), ((1681, 1994), 'hamcrest.has_properties', 'has_properties', (["{'title': record['name'], 'description': record['description'], 'costs':\n record['costs'], 'other_notes': record['other_notes'], 'dashboard_type':\n 'high-volume-transaction', 'customer_type': record['customer_type'],\n 'business_model': record['business_model'], 'published': False}"], {}), "({'title': record['name'], 'description': record[\n 'description'], 'costs': record['costs'], 'other_notes': record[\n 'other_notes'], 'dashboard_type': 'high-volume-transaction',\n 'customer_type': record['customer_type'], 'business_model': record[\n 'business_model'], 'published': False})\n", (1695, 1994), False, 'from hamcrest import assert_that, has_properties, has_entries, equal_to\n'), ((2801, 2829), 'random.randrange', 'random.randrange', (['(10000000.0)'], {}), '(10000000.0)\n', (2817, 2829), False, 'import random\n'), ((2997, 3038), 'hamcrest.has_properties', 'has_properties', (["{'slug': record['tx_id']}"], {}), "({'slug': record['tx_id']})\n", (3011, 3038), False, 'from hamcrest import assert_that, has_properties, has_entries, equal_to\n'), ((4013, 4041), 'random.randrange', 'random.randrange', (['(10000000.0)'], {}), '(10000000.0)\n', (4029, 4041), False, 'import random\n'), ((5027, 5038), 'hamcrest.equal_to', 'equal_to', (['(1)'], {}), '(1)\n', (5035, 5038), False, 'from hamcrest import assert_that, has_properties, has_entries, equal_to\n'), ((5546, 5557), 'hamcrest.equal_to', 'equal_to', (['(1)'], {}), '(1)\n', (5554, 5557), False, 'from hamcrest import assert_that, has_properties, has_entries, equal_to\n'), ((6190, 6250), 'hamcrest.has_properties', 'has_properties', (["{'title': record['name'], 'published': True}"], {}), "({'title': record['name'], 'published': True})\n", (6204, 6250), False, 'from hamcrest import assert_that, has_properties, has_entries, equal_to\n'), ((6777, 6838), 'hamcrest.has_properties', 'has_properties', (["{'title': record['name'], 'published': False}"], {}), "({'title': record['name'], 'published': False})\n", (6791, 6838), False, 'from hamcrest import assert_that, has_properties, has_entries, equal_to\n'), ((7347, 7407), 'hamcrest.has_properties', 'has_properties', (["{'title': record['name'], 'published': True}"], {}), "({'title': record['name'], 'published': True})\n", (7361, 7407), False, 'from hamcrest import assert_that, has_properties, has_entries, equal_to\n'), ((7680, 7758), 'hamcrest.has_entries', 'has_entries', (["{'transactions_per_year': True, 'transactions_per_quarter': True}"], {}), "({'transactions_per_year': True, 'transactions_per_quarter': True})\n", (7691, 7758), False, 'from hamcrest import assert_that, has_properties, has_entries, equal_to\n'), ((8091, 8198), 'hamcrest.has_entries', 'has_entries', (["{'transactions_per_year': True, 'transactions_per_quarter': True,\n 'digital_takeup': True}"], {}), "({'transactions_per_year': True, 'transactions_per_quarter': \n True, 'digital_takeup': True})\n", (8102, 8198), False, 'from hamcrest import assert_that, has_properties, has_entries, equal_to\n'), ((8525, 8632), 'hamcrest.has_entries', 'has_entries', (["{'transactions_per_year': True, 'transactions_per_quarter': True,\n 'digital_takeup': True}"], {}), "({'transactions_per_year': True, 'transactions_per_quarter': \n True, 'digital_takeup': True})\n", (8536, 8632), False, 'from hamcrest import assert_that, has_properties, has_entries, equal_to\n'), ((8951, 9054), 'hamcrest.has_entries', 'has_entries', (["{'transactions_per_year': True, 'transactions_per_quarter': True,\n 'total_cost': True}"], {}), "({'transactions_per_year': True, 'transactions_per_quarter': \n True, 'total_cost': True})\n", (8962, 9054), False, 'from hamcrest import assert_that, has_properties, has_entries, equal_to\n'), ((9393, 9506), 'hamcrest.has_entries', 'has_entries', (["{'transactions_per_year': True, 'transactions_per_quarter': True,\n 'cost_per_transaction': True}"], {}), "({'transactions_per_year': True, 'transactions_per_quarter': \n True, 'cost_per_transaction': True})\n", (9404, 9506), False, 'from hamcrest import assert_that, has_properties, has_entries, equal_to\n'), ((9844, 9951), 'hamcrest.has_entries', 'has_entries', (["{'transactions_per_year': True, 'transactions_per_quarter': True,\n 'digital_takeup': True}"], {}), "({'transactions_per_year': True, 'transactions_per_quarter': \n True, 'digital_takeup': True})\n", (9855, 9951), False, 'from hamcrest import assert_that, has_properties, has_entries, equal_to\n'), ((10280, 10387), 'hamcrest.has_entries', 'has_entries', (["{'transactions_per_year': True, 'transactions_per_quarter': True,\n 'digital_takeup': True}"], {}), "({'transactions_per_year': True, 'transactions_per_quarter': \n True, 'digital_takeup': True})\n", (10291, 10387), False, 'from hamcrest import assert_that, has_properties, has_entries, equal_to\n'), ((10708, 10811), 'hamcrest.has_entries', 'has_entries', (["{'transactions_per_year': True, 'transactions_per_quarter': True,\n 'total_cost': True}"], {}), "({'transactions_per_year': True, 'transactions_per_quarter': \n True, 'total_cost': True})\n", (10719, 10811), False, 'from hamcrest import assert_that, has_properties, has_entries, equal_to\n'), ((11152, 11265), 'hamcrest.has_entries', 'has_entries', (["{'transactions_per_year': True, 'transactions_per_quarter': True,\n 'cost_per_transaction': True}"], {}), "({'transactions_per_year': True, 'transactions_per_quarter': \n True, 'cost_per_transaction': True})\n", (11163, 11265), False, 'from hamcrest import assert_that, has_properties, has_entries, equal_to\n')] |
"""Tests for seq_io module.
"""
from collections import OrderedDict
import logging
import tempfile
from os import unlink
import unittest
from adapt.utils import seq_io
__author__ = '<NAME> <<EMAIL>>'
class TestFastaRead(unittest.TestCase):
"""Tests reading a fasta file.
"""
def setUp(self):
# Disable logging
logging.disable(logging.INFO)
# Write the temporary fasta file
self.fasta = tempfile.NamedTemporaryFile(mode='w', delete=False)
self.fasta.write(">genome_1\n")
self.fasta.write("ATACG\n")
self.fasta.write("TATGC\n")
self.fasta.write(">genome_2\n")
self.fasta.write("ATCG\n")
self.fasta.write("TT\n")
self.fasta.write("Gg\n")
self.fasta.write("\n")
self.fasta.write(">genome_3\n")
self.fasta.write("AAA\n")
self.fasta.write("CCC\n")
self.fasta.write("\n")
self.fasta.write("\n")
self.fasta.write(">genome_4\n")
self.fasta.write("AtA\n")
self.fasta.write("CGC\n")
self.fasta.write("\n")
self.fasta.write("\n")
self.fasta.write("\n")
self.fasta.write(">genome_5\n")
self.fasta.write("AGGA\n")
self.fasta.write("CAAT\n")
self.fasta.write("\n")
self.fasta.write("\n")
# Closes the file so that it can be reopened on Windows
self.fasta.close()
self.expected = OrderedDict()
self.expected["genome_1"] = "ATACGTATGC"
self.expected["genome_2"] = "ATCGTTGG"
self.expected["genome_3"] = "AAACCC"
self.expected["genome_4"] = "ATACGC"
self.expected["genome_5"] = "AGGACAAT"
def test_read(self):
seqs = seq_io.read_fasta(self.fasta.name, make_uppercase=True)
self.assertEqual(seqs, self.expected)
def tearDown(self):
# Delete temporary file
unlink(self.fasta.name)
# Re-enable logging
logging.disable(logging.NOTSET)
class TestFastaWrite(unittest.TestCase):
"""Tests writing and reading a fasta file.
"""
def setUp(self):
# Disable logging
logging.disable(logging.INFO)
# Create a temporary fasta file
self.fasta = tempfile.NamedTemporaryFile(mode='w', delete=False)
# Closes the file so that it can be reopened on Windows
self.fasta.close()
self.seqs = OrderedDict()
self.seqs["genome_1"] = "ATACGTATGC"
self.seqs["genome_2"] = "ATCGTTGG"
self.seqs["genome_3"] = "AAACCC"
self.seqs["genome_4"] = "ATACGC"
self.seqs["genome_5"] = "AGGACAAT"
def test_write_and_read(self):
seq_io.write_fasta(self.seqs, self.fasta.name)
seqs_read = seq_io.read_fasta(self.fasta.name, make_uppercase=True)
self.assertEqual(self.seqs, seqs_read)
def tearDown(self):
# Delete temporary file
unlink(self.fasta.name)
# Re-enable logging
logging.disable(logging.NOTSET)
| [
"collections.OrderedDict",
"adapt.utils.seq_io.write_fasta",
"adapt.utils.seq_io.read_fasta",
"os.unlink",
"tempfile.NamedTemporaryFile",
"logging.disable"
] | [((344, 373), 'logging.disable', 'logging.disable', (['logging.INFO'], {}), '(logging.INFO)\n', (359, 373), False, 'import logging\n'), ((437, 488), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w"""', 'delete': '(False)'}), "(mode='w', delete=False)\n", (464, 488), False, 'import tempfile\n'), ((1432, 1445), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1443, 1445), False, 'from collections import OrderedDict\n'), ((1720, 1775), 'adapt.utils.seq_io.read_fasta', 'seq_io.read_fasta', (['self.fasta.name'], {'make_uppercase': '(True)'}), '(self.fasta.name, make_uppercase=True)\n', (1737, 1775), False, 'from adapt.utils import seq_io\n'), ((1887, 1910), 'os.unlink', 'unlink', (['self.fasta.name'], {}), '(self.fasta.name)\n', (1893, 1910), False, 'from os import unlink\n'), ((1948, 1979), 'logging.disable', 'logging.disable', (['logging.NOTSET'], {}), '(logging.NOTSET)\n', (1963, 1979), False, 'import logging\n'), ((2134, 2163), 'logging.disable', 'logging.disable', (['logging.INFO'], {}), '(logging.INFO)\n', (2149, 2163), False, 'import logging\n'), ((2226, 2277), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w"""', 'delete': '(False)'}), "(mode='w', delete=False)\n", (2253, 2277), False, 'import tempfile\n'), ((2390, 2403), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2401, 2403), False, 'from collections import OrderedDict\n'), ((2661, 2707), 'adapt.utils.seq_io.write_fasta', 'seq_io.write_fasta', (['self.seqs', 'self.fasta.name'], {}), '(self.seqs, self.fasta.name)\n', (2679, 2707), False, 'from adapt.utils import seq_io\n'), ((2728, 2783), 'adapt.utils.seq_io.read_fasta', 'seq_io.read_fasta', (['self.fasta.name'], {'make_uppercase': '(True)'}), '(self.fasta.name, make_uppercase=True)\n', (2745, 2783), False, 'from adapt.utils import seq_io\n'), ((2896, 2919), 'os.unlink', 'unlink', (['self.fasta.name'], {}), '(self.fasta.name)\n', (2902, 2919), False, 'from os import unlink\n'), ((2957, 2988), 'logging.disable', 'logging.disable', (['logging.NOTSET'], {}), '(logging.NOTSET)\n', (2972, 2988), False, 'import logging\n')] |
from setuptools import setup, find_packages
import tensorhive
import os
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='tensorhive',
version=tensorhive.__version__,
license='Apache License 2.0',
packages=find_packages(),
include_package_data=True,
entry_points={
'console_scripts': [
'tensorhive = tensorhive.__main__:main'
],
},
description='A user-friendly GPU management tool for distributed machine learning workloads',
long_description=long_description,
long_description_content_type='text/markdown',
author='<NAME>, <NAME>, <NAME>',
author_email='<EMAIL>',
url='https://github.com/roscisz/TensorHive',
download_url='https://github.com/roscisz/TensorHive/archive/{}.tar.gz'.format(tensorhive.__version__),
keywords='gpu reservation calendar monitoring machine learning distributed tensorflow pytorch',
install_requires=[
'alembic==1.0.3',
'bcrypt==3.1.7',
'certifi==2020.12.5',
'cffi==1.14.5',
'chardet==4.0.0',
'Click==7.0',
'clickclick==20.10.2',
'coloredlogs==10.0',
'connexion==2.3.0',
'cryptography==3.2.1',
'Flask==1.1.4',
'Flask-Cors==3.0.7',
'Flask-JWT-Extended==3.13.1',
'gevent==21.1.2',
'greenlet==1.1.0',
'gunicorn==19.9.0',
'humanfriendly==9.1',
'idna==2.10',
'inflection==0.5.1',
'itsdangerous==1.1.0',
'Jinja2==2.11.3',
'jsonschema==2.6.0',
'Mako==1.1.4',
'MarkupSafe==1.1.1',
'openapi-spec-validator==0.2.9',
'paramiko==2.7.2',
'parallel-ssh==1.9.1',
'passlib==1.7.1',
'pycparser==2.20',
'PyJWT==1.7.1',
'PyNaCl==1.4.0',
'python-dateutil==2.8.1',
'python-editor==1.0.4',
'python-usernames==0.2.3',
'PyYAML==5.3.1',
'requests==2.25.1',
'Safe==0.4',
'six==1.16.0',
'SQLAlchemy==1.3.0',
'SQLAlchemy-Utils==0.33.8',
'ssh2-python==0.26.0',
'stringcase==1.2.0',
'swagger_ui_bundle==0.0.8',
'urllib3==1.26.4',
'Werkzeug==0.16.1',
'zope.event==4.5.0',
'zope.interface==5.4.0',
],
zip_safe=False
)
| [
"os.path.dirname",
"setuptools.find_packages",
"os.path.join"
] | [((106, 131), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (121, 131), False, 'import os\n'), ((143, 184), 'os.path.join', 'os.path.join', (['this_directory', '"""README.md"""'], {}), "(this_directory, 'README.md')\n", (155, 184), False, 'import os\n'), ((357, 372), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (370, 372), False, 'from setuptools import setup, find_packages\n')] |
import copy
import functools
import logging
import os
from distutils.util import strtobool
from typing import Any, Callable, Dict
from aws_xray_sdk.core import models, patch_all, xray_recorder
is_cold_start = True
logger = logging.getLogger(__name__)
class Tracer:
"""Tracer using AWS-XRay to provide decorators with known defaults for Lambda functions
When running locally, it detects whether it's running via SAM CLI,
and if it is it returns dummy segments/subsegments instead.
By default, it patches all available libraries supported by X-Ray SDK. Patching is
automatically disabled when running locally via SAM CLI or by any other means. \n
Ref: https://docs.aws.amazon.com/xray-sdk-for-python/latest/reference/thirdparty.html
Tracer keeps a copy of its configuration as it can be instantiated more than once. This
is useful when you are using your own middlewares and want to utilize an existing Tracer.
Make sure to set `auto_patch=False` in subsequent Tracer instances to avoid double patching.
Environment variables
---------------------
POWERTOOLS_TRACE_DISABLED : str
disable tracer (e.g. `"true", "True", "TRUE"`)
POWERTOOLS_SERVICE_NAME : str
service name
Parameters
----------
service: str
Service name that will be appended in all tracing metadata
auto_patch: bool
Patch existing imported modules during initialization, by default True
disabled: bool
Flag to explicitly disable tracing, useful when running/testing locally.
`Env POWERTOOLS_TRACE_DISABLED="true"`
Example
-------
**A Lambda function using Tracer**
from aws_lambda_powertools.tracing import Tracer
tracer = Tracer(service="greeting")
@tracer.capture_method
def greeting(name: str) -> Dict:
return {
"name": name
}
@tracer.capture_lambda_handler
def handler(event: dict, context: Any) -> Dict:
print("Received event from Lambda...")
response = greeting(name="Heitor")
return response
**Booking Lambda function using Tracer that adds additional annotation/metadata**
from aws_lambda_powertools.tracing import Tracer
tracer = Tracer(service="booking")
@tracer.capture_method
def confirm_booking(booking_id: str) -> Dict:
resp = add_confirmation(booking_id)
tracer.put_annotation("BookingConfirmation", resp['requestId'])
tracer.put_metadata("Booking confirmation", resp)
return resp
@tracer.capture_lambda_handler
def handler(event: dict, context: Any) -> Dict:
print("Received event from Lambda...")
response = greeting(name="Heitor")
return response
**A Lambda function using service name via POWERTOOLS_SERVICE_NAME**
export POWERTOOLS_SERVICE_NAME="booking"
from aws_lambda_powertools.tracing import Tracer
tracer = Tracer()
@tracer.capture_lambda_handler
def handler(event: dict, context: Any) -> Dict:
print("Received event from Lambda...")
response = greeting(name="Lessa")
return response
**Reuse an existing instance of Tracer anywhere in the code**
# lambda_handler.py
from aws_lambda_powertools.tracing import Tracer
tracer = Tracer()
@tracer.capture_lambda_handler
def handler(event: dict, context: Any) -> Dict:
...
# utils.py
from aws_lambda_powertools.tracing import Tracer
tracer = Tracer()
...
Returns
-------
Tracer
Tracer instance with imported modules patched
Limitations
-----------
* Async handler and methods not supported
"""
_default_config = {"service": "service_undefined", "disabled": False, "provider": xray_recorder, "auto_patch": True}
_config = copy.copy(_default_config)
def __init__(
self, service: str = None, disabled: bool = None, provider: xray_recorder = None, auto_patch: bool = None
):
self.__build_config(service=service, disabled=disabled, provider=provider, auto_patch=auto_patch)
self.provider = self._config["provider"]
self.disabled = self._config["disabled"]
self.service = self._config["service"]
self.auto_patch = self._config["auto_patch"]
if self.disabled:
self.__disable_tracing_provider()
if self.auto_patch:
self.patch()
def capture_lambda_handler(self, lambda_handler: Callable[[Dict, Any], Any] = None):
"""Decorator to create subsegment for lambda handlers
As Lambda follows (event, context) signature we can remove some of the boilerplate
and also capture any exception any Lambda function throws or its response as metadata
Example
-------
**Lambda function using capture_lambda_handler decorator**
tracer = Tracer(service="payment")
@tracer.capture_lambda_handler
def handler(event, context)
Parameters
----------
method : Callable
Method to annotate on
Raises
------
err
Exception raised by method
"""
@functools.wraps(lambda_handler)
def decorate(event, context):
self.create_subsegment(name=f"## {lambda_handler.__name__}")
try:
logger.debug("Calling lambda handler")
response = lambda_handler(event, context)
logger.debug("Received lambda handler response successfully")
logger.debug(response)
if response:
self.put_metadata("lambda handler response", response)
except Exception as err:
logger.exception("Exception received from lambda handler", exc_info=True)
self.put_metadata(f"{self.service}_error", err)
raise
finally:
self.end_subsegment()
return response
return decorate
def capture_method(self, method: Callable = None):
"""Decorator to create subsegment for arbitrary functions
It also captures both response and exceptions as metadata
and creates a subsegment named `## <method_name>`
Example
-------
**Custom function using capture_method decorator**
tracer = Tracer(service="payment")
@tracer.capture_method
def some_function()
Parameters
----------
method : Callable
Method to annotate on
Raises
------
err
Exception raised by method
"""
@functools.wraps(method)
def decorate(*args, **kwargs):
method_name = f"{method.__name__}"
self.create_subsegment(name=f"## {method_name}")
try:
logger.debug(f"Calling method: {method_name}")
response = method(*args, **kwargs)
logger.debug(f"Received {method_name} response successfully")
logger.debug(response)
if response is not None:
self.put_metadata(f"{method_name} response", response)
except Exception as err:
logger.exception(f"Exception received from '{method_name}'' method", exc_info=True)
self.put_metadata(f"{method_name} error", err)
raise
finally:
self.end_subsegment()
return response
return decorate
def put_annotation(self, key: str, value: Any):
"""Adds annotation to existing segment or subsegment
Example
-------
Custom annotation for a pseudo service named payment
tracer = Tracer(service="payment")
tracer.put_annotation("PaymentStatus", "CONFIRMED")
Parameters
----------
key : str
Annotation key (e.g. PaymentStatus)
value : Any
Value for annotation (e.g. "CONFIRMED")
"""
# Will no longer be needed once #155 is resolved
# https://github.com/aws/aws-xray-sdk-python/issues/155
if self.disabled:
return
logger.debug(f"Annotating on key '{key}'' with '{value}''")
self.provider.put_annotation(key=key, value=value)
def put_metadata(self, key: str, value: object, namespace: str = None):
"""Adds metadata to existing segment or subsegment
Parameters
----------
key : str
Metadata key
value : object
Value for metadata
namespace : str, optional
Namespace that metadata will lie under, by default None
Example
-------
Custom metadata for a pseudo service named payment
tracer = Tracer(service="payment")
response = collect_payment()
tracer.put_metadata("Payment collection", response)
"""
# Will no longer be needed once #155 is resolved
# https://github.com/aws/aws-xray-sdk-python/issues/155
if self.disabled:
return
_namespace = namespace or self.service
logger.debug(f"Adding metadata on key '{key}'' with '{value}'' at namespace '{namespace}''")
self.provider.put_metadata(key=key, value=value, namespace=_namespace)
def create_subsegment(self, name: str) -> models.subsegment:
"""Creates subsegment or a dummy segment plus subsegment if tracing is disabled
It also assumes Tracer would be instantiated statically so that cold starts are captured.
Parameters
----------
name : str
Subsegment name
Example
-------
Creates a genuine subsegment
self.create_subsegment(name="a meaningful name")
Returns
-------
models.subsegment
AWS X-Ray Subsegment
"""
# Will no longer be needed once #155 is resolved
# https://github.com/aws/aws-xray-sdk-python/issues/155
subsegment = None
if self.disabled:
logger.debug("Tracing has been disabled, return dummy subsegment instead")
segment = models.dummy_entities.DummySegment()
subsegment = models.dummy_entities.DummySubsegment(segment)
else:
subsegment = self.provider.begin_subsegment(name=name)
global is_cold_start
if is_cold_start:
logger.debug("Annotating cold start")
subsegment.put_annotation("ColdStart", True)
is_cold_start = False
return subsegment
def end_subsegment(self):
"""Ends an existing subsegment
Parameters
----------
subsegment : models.subsegment
Subsegment previously created
"""
if self.disabled:
logger.debug("Tracing has been disabled, return instead")
return
self.provider.end_subsegment()
def patch(self):
"""Patch modules for instrumentation"""
logger.debug("Patching modules...")
if self.disabled:
logger.debug("Tracing has been disabled, aborting patch")
return
patch_all() # pragma: no cover
def __disable_tracing_provider(self):
"""Forcefully disables tracing and patching"""
from aws_xray_sdk import global_sdk_config
global_sdk_config.set_sdk_enabled(False)
def __is_trace_disabled(self) -> bool:
"""Detects whether trace has been disabled
Tracing is automatically disabled in the following conditions:
1. Explicitly disabled via `TRACE_DISABLED` environment variable
2. Running in Lambda Emulators, or locally where X-Ray Daemon will not be listening
3. Explicitly disabled via constructor e.g `Tracer(disabled=True)`
Returns
-------
bool
"""
logger.debug("Verifying whether Tracing has been disabled")
is_lambda_sam_cli = os.getenv("AWS_SAM_LOCAL")
env_option = str(os.getenv("POWERTOOLS_TRACE_DISABLED", "false"))
disabled_env = strtobool(env_option)
if disabled_env:
logger.debug("Tracing has been disabled via env var POWERTOOLS_TRACE_DISABLED")
return disabled_env
if is_lambda_sam_cli:
logger.debug("Running under SAM CLI env or not in Lambda env; disabling Tracing")
return True
return False
def __build_config(
self, service: str = None, disabled: bool = None, provider: xray_recorder = None, auto_patch: bool = None
):
""" Populates Tracer config for new and existing initializations """
is_disabled = disabled if disabled is not None else self.__is_trace_disabled()
is_service = service if service is not None else os.getenv("POWERTOOLS_SERVICE_NAME")
self._config["provider"] = provider if provider is not None else self._config["provider"]
self._config["auto_patch"] = auto_patch if auto_patch is not None else self._config["auto_patch"]
self._config["service"] = is_service if is_service else self._config["service"]
self._config["disabled"] = is_disabled if is_disabled else self._config["disabled"]
@classmethod
def _reset_config(cls):
cls._config = copy.copy(cls._default_config)
| [
"logging.getLogger",
"distutils.util.strtobool",
"aws_xray_sdk.core.patch_all",
"os.getenv",
"aws_xray_sdk.core.models.dummy_entities.DummySegment",
"functools.wraps",
"aws_xray_sdk.global_sdk_config.set_sdk_enabled",
"copy.copy",
"aws_xray_sdk.core.models.dummy_entities.DummySubsegment"
] | [((225, 252), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (242, 252), False, 'import logging\n'), ((4003, 4029), 'copy.copy', 'copy.copy', (['_default_config'], {}), '(_default_config)\n', (4012, 4029), False, 'import copy\n'), ((5373, 5404), 'functools.wraps', 'functools.wraps', (['lambda_handler'], {}), '(lambda_handler)\n', (5388, 5404), False, 'import functools\n'), ((6851, 6874), 'functools.wraps', 'functools.wraps', (['method'], {}), '(method)\n', (6866, 6874), False, 'import functools\n'), ((11437, 11448), 'aws_xray_sdk.core.patch_all', 'patch_all', ([], {}), '()\n', (11446, 11448), False, 'from aws_xray_sdk.core import models, patch_all, xray_recorder\n'), ((11627, 11667), 'aws_xray_sdk.global_sdk_config.set_sdk_enabled', 'global_sdk_config.set_sdk_enabled', (['(False)'], {}), '(False)\n', (11660, 11667), False, 'from aws_xray_sdk import global_sdk_config\n'), ((12230, 12256), 'os.getenv', 'os.getenv', (['"""AWS_SAM_LOCAL"""'], {}), "('AWS_SAM_LOCAL')\n", (12239, 12256), False, 'import os\n'), ((12354, 12375), 'distutils.util.strtobool', 'strtobool', (['env_option'], {}), '(env_option)\n', (12363, 12375), False, 'from distutils.util import strtobool\n'), ((13554, 13584), 'copy.copy', 'copy.copy', (['cls._default_config'], {}), '(cls._default_config)\n', (13563, 13584), False, 'import copy\n'), ((10408, 10444), 'aws_xray_sdk.core.models.dummy_entities.DummySegment', 'models.dummy_entities.DummySegment', ([], {}), '()\n', (10442, 10444), False, 'from aws_xray_sdk.core import models, patch_all, xray_recorder\n'), ((10470, 10516), 'aws_xray_sdk.core.models.dummy_entities.DummySubsegment', 'models.dummy_entities.DummySubsegment', (['segment'], {}), '(segment)\n', (10507, 10516), False, 'from aws_xray_sdk.core import models, patch_all, xray_recorder\n'), ((12282, 12329), 'os.getenv', 'os.getenv', (['"""POWERTOOLS_TRACE_DISABLED"""', '"""false"""'], {}), "('POWERTOOLS_TRACE_DISABLED', 'false')\n", (12291, 12329), False, 'import os\n'), ((13064, 13100), 'os.getenv', 'os.getenv', (['"""POWERTOOLS_SERVICE_NAME"""'], {}), "('POWERTOOLS_SERVICE_NAME')\n", (13073, 13100), False, 'import os\n')] |
import pacmap
import numpy as np
import matplotlib.pyplot as plt
# loading preprocessed coil_20 dataset
# you can change it with any dataset that is in the ndarray format, with the shape (N, D)
# where N is the number of samples and D is the dimension of each sample
X = np.load("../data/coil_20.npy", allow_pickle=True)
X = X.reshape(X.shape[0], -1)
y = np.load("./data/coil_20_labels.npy", allow_pickle=True)
# Initialize the pacmap instance
# Setting n_neighbors to "None" leads to an automatic parameter selection
# choice shown in "parameter" section of the README file.
# Notice that from v0.6.0 on, we rename the n_dims parameter to n_components.
embedding = pacmap.PaCMAP(n_components=2, n_neighbors=None, MN_ratio=0.5, FP_ratio=2.0)
# fit the data (The index of transformed data corresponds to the index of the original data)
X_transformed = embedding.fit_transform(X, init="pca")
# visualize the embedding
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
ax.scatter(X_transformed[:, 0], X_transformed[:, 1], cmap="Spectral", c=y, s=0.6)
| [
"numpy.load",
"matplotlib.pyplot.subplots",
"pacmap.PaCMAP"
] | [((272, 321), 'numpy.load', 'np.load', (['"""../data/coil_20.npy"""'], {'allow_pickle': '(True)'}), "('../data/coil_20.npy', allow_pickle=True)\n", (279, 321), True, 'import numpy as np\n'), ((356, 411), 'numpy.load', 'np.load', (['"""./data/coil_20_labels.npy"""'], {'allow_pickle': '(True)'}), "('./data/coil_20_labels.npy', allow_pickle=True)\n", (363, 411), True, 'import numpy as np\n'), ((669, 744), 'pacmap.PaCMAP', 'pacmap.PaCMAP', ([], {'n_components': '(2)', 'n_neighbors': 'None', 'MN_ratio': '(0.5)', 'FP_ratio': '(2.0)'}), '(n_components=2, n_neighbors=None, MN_ratio=0.5, FP_ratio=2.0)\n', (682, 744), False, 'import pacmap\n'), ((932, 966), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(6, 6)'}), '(1, 1, figsize=(6, 6))\n', (944, 966), True, 'import matplotlib.pyplot as plt\n')] |
from django.db import models
# Create your models here.
class Country(models.Model):
country_name = models.CharField(max_length=50, blank=False)
country_short_name = models.CharField(max_length=4, blank=False)
country_img_url = models.CharField(max_length=120, blank=False)
def __str__(self):
return self.country_short_name
class Sex(models.Model):
sex_shortcut = models.CharField(max_length=4, blank=False)
sex_full = models.CharField(max_length=40, blank=False)
def __str__(self):
return self.sex_shortcut
class UserModel(models.Model):
username = models.CharField(max_length=30, blank=False)
email = models.EmailField(max_length=100)
password = models.CharField(max_length=100, blank=False)
name = models.CharField(max_length=30, blank=False)
secondname = models.CharField(max_length=30, blank=False)
surename = models.CharField(max_length=50, blank=False)
country = models.ForeignKey(Country, related_name='country', on_delete=models.CASCADE)
sex = models.ForeignKey(Sex, related_name='sex', on_delete=models.CASCADE)
street = models.CharField(max_length=50, blank=False)
postal_code = models.CharField(max_length=15, blank=False)
birth_date = models.DateField()
def __str__(self):
return self.username
| [
"django.db.models.EmailField",
"django.db.models.DateField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] | [((105, 149), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'blank': '(False)'}), '(max_length=50, blank=False)\n', (121, 149), False, 'from django.db import models\n'), ((175, 218), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(4)', 'blank': '(False)'}), '(max_length=4, blank=False)\n', (191, 218), False, 'from django.db import models\n'), ((241, 286), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(120)', 'blank': '(False)'}), '(max_length=120, blank=False)\n', (257, 286), False, 'from django.db import models\n'), ((395, 438), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(4)', 'blank': '(False)'}), '(max_length=4, blank=False)\n', (411, 438), False, 'from django.db import models\n'), ((454, 498), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)', 'blank': '(False)'}), '(max_length=40, blank=False)\n', (470, 498), False, 'from django.db import models\n'), ((604, 648), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'blank': '(False)'}), '(max_length=30, blank=False)\n', (620, 648), False, 'from django.db import models\n'), ((661, 694), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (678, 694), False, 'from django.db import models\n'), ((710, 755), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(False)'}), '(max_length=100, blank=False)\n', (726, 755), False, 'from django.db import models\n'), ((767, 811), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'blank': '(False)'}), '(max_length=30, blank=False)\n', (783, 811), False, 'from django.db import models\n'), ((829, 873), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'blank': '(False)'}), '(max_length=30, blank=False)\n', (845, 873), False, 'from django.db import models\n'), ((889, 933), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'blank': '(False)'}), '(max_length=50, blank=False)\n', (905, 933), False, 'from django.db import models\n'), ((948, 1024), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Country'], {'related_name': '"""country"""', 'on_delete': 'models.CASCADE'}), "(Country, related_name='country', on_delete=models.CASCADE)\n", (965, 1024), False, 'from django.db import models\n'), ((1035, 1103), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Sex'], {'related_name': '"""sex"""', 'on_delete': 'models.CASCADE'}), "(Sex, related_name='sex', on_delete=models.CASCADE)\n", (1052, 1103), False, 'from django.db import models\n'), ((1117, 1161), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'blank': '(False)'}), '(max_length=50, blank=False)\n', (1133, 1161), False, 'from django.db import models\n'), ((1180, 1224), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(15)', 'blank': '(False)'}), '(max_length=15, blank=False)\n', (1196, 1224), False, 'from django.db import models\n'), ((1242, 1260), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (1258, 1260), False, 'from django.db import models\n')] |
from __future__ import print_function
from random import randint
import numpy as np
from Candy import *
from Move import Move
from uncertainty_exception import UncertaintyException
class Board:
DEFAULT_NUM_OF_CANDIES = 6
DEFAULT_HEIGHT = 9
DEFAULT_WIDTH = 9
DEFAULT_STRIKE = 3
NO_SCORE = 0
STRIPED, CHOCOLATE = 4, 5
NONE_MOVE = Move((-1, -1), (-1, -2), True)
def __init__(self, num_of_candies=DEFAULT_NUM_OF_CANDIES, height=DEFAULT_HEIGHT, width=DEFAULT_WIDTH,
board_to_copy=None):
"""
this function create new game board
"""
self.num_of_candies = num_of_candies
self.score = 0
self.board = None
self.exploded_counter = 0
self.striped_counter = 0
self.wrapped_counter = 0
self.chocolate_counter = 0
self.unknown_prec = 0
if board_to_copy is not None and board_to_copy.any() and len(board_to_copy.shape) == 2 and \
board_to_copy.shape[0] * board_to_copy.shape[1] != 0:
self.height = board_to_copy.shape[0]
self.width = board_to_copy.shape[1]
self.interpret_board(board_to_copy)
else:
self.height = height
self.width = width
self.initialize_board()
@staticmethod
def access_key(x, dictionary):
for key in dictionary.keys():
if type(key) == int:
if x == key:
return key
elif x in key:
return key
@staticmethod
def get_candy(candy_number, location):
translation_dict = {(0, 2, 4, 6, 8, 10): Candy, (1, 3, 5, 7, 9, 11): HorizontalStriped,
(13, 14, 15, 16, 17, 18): VerticalStriped, 12: Chocolate,
(19, 20, 21, 22, 23, 24): Wrapped, -1: 'special'}
translation_color = {(0, 1, 13, 19): 0, (2, 3, 14, 20): 1, (4, 5, 15, 21): 2, (6, 7, 18, 22): 3,
(8, 9, 16, 23): 4, (10, 11, 17, 24): 5, 12: 6}
key = Board.access_key(candy_number, translation_dict)
color_key = Board.access_key(candy_number, translation_color)
return translation_dict[key](color=translation_color[color_key], location=location)
def interpret_board(self, numbers_board):
# initialize the new board
board = np.ndarray(numbers_board.shape, dtype=object)
self.height, self.width = board.shape
for row in range(numbers_board.shape[0]):
for col in range(numbers_board.shape[1]):
board[row, col] = Board.get_candy(numbers_board[row, col], (row, col))
self.board = board
def initialize_board(self):
"""
this function initialize new board with random candies.
"""
self.unknown_prec = 0
self.exploded_counter = 0
new_board = np.zeros(shape=(self.height, self.width), dtype=object)
for row in range(self.height):
for col in range(self.width):
new_board[row][col] = Candy(randint(0, self.num_of_candies - 1),
(row, col)) # in randint the edges are inclusive
self.board = new_board
def is_empty(self, location):
return self.board[location].empty or isinstance(self.board[location], UnknownCandy)
def check_row_matches(self, row, strike_length=DEFAULT_STRIKE):
"""
:param row: the row of want to check
:param strike_length: the length of the strikes you want
:return: list of tuple describe the the strikes in length of 3 or more in the row
"""
length, strike_start, strike_end = 1, 0, 0
list_of_matches = []
for col in range(self.width):
if col == self.width - 1 or self.board[row, col].color != self.board[row, col + 1].color or self.is_empty(
(row, col + 1)) or self.is_empty((row, col)):
if length >= strike_length:
list_of_matches.append((strike_start, strike_end))
length = 1
strike_start = col + 1
else:
length += 1
strike_end = col + 1
return list_of_matches
def check_col_matches(self, col, strike_length=DEFAULT_STRIKE):
"""
:param col: the col of want to check
:param strike_length: the length of the strikes you want
:return: list of tuple describe the the strikes in length of 3 or more in the col
"""
length, strike_start, strike_end = 1, 0, 0
list_of_matches = []
for row in range(self.height):
if row == self.height - 1 or self.board[row][col].color != self.board[row + 1][col].color or self.is_empty(
(row + 1, col)) or self.is_empty((row, col)):
if length >= strike_length:
list_of_matches.append((strike_start, strike_end))
length = 1
strike_start = row + 1
else:
length += 1
strike_end = row + 1
return list_of_matches
def mark_candies_to_explode(self, last_move=NONE_MOVE):
"""
this function mark all the candies in the board that need to be exploded and also create all the special
candies in the board
:param last_move: the last move
:return the socre the initial marking accord to + the number of each type of special candies created
"""
score = Board.NO_SCORE
##############
# ROW CHECK
##############
if last_move != Board.NONE_MOVE:
if isinstance(self.board[last_move.start], Special):
score += self.board[last_move.start].swipe_explosion(self.board, last_move.end)
if isinstance(self.board[last_move.end], Special):
score += self.board[last_move.end].swipe_explosion(self.board, last_move.start)
for row in range(self.height):
for tuple_indices in self.check_row_matches(row):
length = tuple_indices[1] - tuple_indices[0] + 1
for col_index in range(length):
self.board[row][col_index + tuple_indices[0]].mark = True
if length == Board.STRIPED: # stripe
self.striped_counter += 1
score += 60
# todo estimate the score that will yield from this candy
if row == last_move.start[0] or row == last_move.end[0]:
if tuple_indices[1] >= last_move.start[1] >= tuple_indices[0]:
self.board[row][last_move.start[1]] = VerticalStriped(
self.board[row][last_move.start[1]].color, (row, last_move.start[1])) # ""
elif tuple_indices[1] >= last_move.end[1] >= tuple_indices[
0]: # the last move cause this streak
self.board[row][last_move.end[1]] = Striped(self.board[row][last_move.end[1]].color,
(row, last_move.end[1]),
DIRECTIONS[VERT])
elif length >= Board.CHOCOLATE: # color bomb
self.chocolate_counter += 1
# this is a five streak - each candy awards 40 points instead of 20
score += 120
if row == last_move.start[0] or row == last_move.end[0]:
if tuple_indices[1] >= last_move.start[1] >= tuple_indices[0]:
self.board[row][last_move.start[1]] = Chocolate((row, last_move.start[1])) # ""
elif tuple_indices[1] >= last_move.end[1] >= tuple_indices[
0]: # the last move cause this strike
self.board[row][last_move.start[1]] = Chocolate((row, last_move.end[1]))
##############
# COLUMN CHECK
##############
for col in range(self.width):
for tuple_indices in self.check_col_matches(col):
length = tuple_indices[1] - tuple_indices[0] + 1
for row_index in range(length):
candy = self.board[row_index + tuple_indices[0], col]
# check for wrap
if (candy.mark and not isinstance(candy, Wrapped)) or (
isinstance(candy, Striped) and not candy.mark and 0 < col < self.width - 1 and self.board[
row_index + tuple_indices[0], col - 1].mark and self.board[
row_index + tuple_indices[0], col - 1].color == candy.color and self.board[
row_index + tuple_indices[0], col + 1].mark and self.board[
row_index + tuple_indices[0], col + 1].color == candy.color):
# if this is an wrap structure, even it also strip structure, make wrap
if isinstance(candy, Striped):
self.striped_counter -= 1
score -= 60
self.wrapped_counter += 1
score += 120
self.board[row_index + tuple_indices[0], col] = Wrapped(
self.board[row_index + tuple_indices[0], col].color, (row_index + tuple_indices[0], col))
else:
candy.mark = True
if length == Board.STRIPED: # stripe
self.striped_counter += 1
score += 60
if col == last_move.start[1] or col == last_move.end[1]:
if tuple_indices[1] >= last_move.start[0] >= tuple_indices[0]:
if self.board[last_move.start[0]][col].mark: # is not un marked wrap:
self.board[last_move.start[0]][col] = HorizontalStriped(
self.board[last_move.start[0], col].color, (last_move.start[0], col)) # ""
elif tuple_indices[1] >= last_move.end[0] >= tuple_indices[
0]: # the last move cause this strike
if self.board[last_move.end[0]][col].mark: # is not un marked wrap:
self.board[last_move.end[0]][col] = Striped(self.board[last_move.end[0], col].color,
(last_move.end[0], col),
DIRECTIONS[
HORZ]) # with the direction of the
# last move direction, remove the mark!
else:
self.board[tuple_indices[1]][col] = Striped(self.board[tuple_indices[1]][col].color,
(tuple_indices[1], col), DIRECTIONS[VERT])
elif length == Board.CHOCOLATE: # color bomb
self.chocolate_counter += 1
if col == last_move.start[1] or col == last_move.end[1]:
if tuple_indices[1] >= last_move.start[0] >= tuple_indices[0]:
self.board[last_move.start[0]][col] = Chocolate((last_move.start[0], col))
elif tuple_indices[1] >= last_move.end[0] >= tuple_indices[
0]: # the last move cause this strike
self.board[last_move.start[0]][col] = Chocolate((last_move.start[0],
col)) # with the direction of the last
# move direction, remove the mark!
return score
def print_board(self):
print()
for row in range(self.height):
for col in range(self.width):
if self.board[row][col].empty:
print('{:16}'.format("Empty"), end="")
else:
print(self.board[row][col], end="")
print()
print("The score of the board is: ", self.score)
def in_board(self, location):
"""
:param location: a 2d coordinate
:return: True if it is a valid coordinate on board
"""
return 0 <= location[0] < self.height and 0 <= location[1] < self.width
def get_adjacent(self, location):
"""
:param location: a coordinate in board
:return all adjacent locations to given location
"""
adjacent_loc = [(location[0] - 1, location[1]), (location[0] + 1, location[1]), (location[0], location[1] - 1),
(location[0] - 1, location[1] + 1)]
for adjacent in adjacent_loc:
if not self.in_board(adjacent):
adjacent_loc.remove(adjacent)
return adjacent_loc
# this function is part of board class
def possible_moves(self):
possible_moves = []
########################
# check horizontal moves
########################
for row in range(self.height):
for col in range(self.width - 1):
self.make_move((row, col), (row, col + 1)) # make move only for checking for matching
if self.check_row_matches(row) or self.check_col_matches(col) or self.check_col_matches(
col + 1) or (
isinstance(self.board[row, col], Special) and isinstance(self.board[row, col + 1],
Special)) or \
(isinstance(self.board[row, col], Chocolate) and not self.is_empty((row, col + 1))) or \
(isinstance(self.board[row, col + 1], Chocolate) and not self.is_empty((row, col))):
possible_moves.append(Move((row, col), (row, col + 1), HORZ))
self.make_move((row, col), (row, col + 1)) # return to the original board by commit the move again
########################
# check vertical moves
########################
for col in range(self.width):
for row in range(self.height - 1):
self.make_move((row, col), (row + 1, col)) # make move only for checking for matching
if self.check_col_matches(col) or self.check_row_matches(row) or self.check_row_matches(
row + 1) or (
isinstance(self.board[row, col], Special) and isinstance(self.board[row + 1, col],
Special)) or \
(isinstance(self.board[row, col], Chocolate) and not self.is_empty((row + 1, col))) or \
(isinstance(self.board[row + 1, col], Chocolate) and not self.is_empty((row, col))):
possible_moves.append(Move((row, col), (row + 1, col), VERT))
self.make_move((row, col), (row + 1, col)) # return to the original board by commit the move again
return possible_moves
def print_possible_moves(self):
possible_moves = self.possible_moves()
print("possible moves:")
for move_num, move in enumerate(possible_moves):
print("move number " + str(move_num) + ": ", move)
def make_move(self, start_tup, end_tup):
tmp = self.board[start_tup]
self.board[start_tup] = self.board[end_tup]
self.board[start_tup].location = start_tup # we also need to update the location
self.board[end_tup] = tmp
self.board[end_tup].location = end_tup
def cascade(self):
for col in range(self.width):
for row in range(self.height - 1, -1, -1):
col_is_empty = False
if self.board[(row, col)].empty:
tmp_row = row # we don't want to override the original row
while self.board[(tmp_row, col)].empty:
if tmp_row == 0: # this is the higher candy in the col
col_is_empty = True
break
tmp_row -= 1
if col_is_empty:
break
else:
self.make_move((row, col), (tmp_row, col))
def print_matches(self):
print()
print("rows:")
for row in range(self.height):
if self.check_row_matches(row):
print(str(row), " ", self.check_row_matches(row))
print("cols:")
for col in range(self.width):
if self.check_col_matches(col):
print(str(col), " ", self.check_col_matches(col))
def explosions(self):
score = 0
for row in range(self.height):
for col in range(self.width):
if self.board[row, col].mark and not self.board[row, col].empty:
score += self.board[row, col].explode(self.board, multiplier=self.multiplier)
return score
def turn_chunk(self, move=NONE_MOVE, with_unknowns=True):
score = self.mark_candies_to_explode(move)
# self.print_board()
score += self.explosions()
self.cascade()
self.reset_next_round()
if not with_unknowns:
self.update_unknowns()
return score
def turn_function(self, move=NONE_MOVE, with_unknowns=True):
self.multiplier = 1
score = self.multiplier * self.turn_chunk(move, with_unknowns=with_unknowns)
self.multiplier += 1
chain_score = self.multiplier * self.turn_chunk(with_unknowns=with_unknowns)
while chain_score > 0:
self.multiplier += 1
score += chain_score
chain_score = self.multiplier * self.turn_chunk(with_unknowns=with_unknowns)
self.score += score
self.unknown_prec = float(self.exploded_counter) / (self.height * self.width)
return score
def reset_next_round(self, rand_flag=False):
if rand_flag:
self.update_unknowns()
return
for row in range(self.height):
for col in range(self.width):
if self.board[row, col].empty:
self.exploded_counter += 1
self.board[row, col] = UnknownCandy((row, col))
def play_a_game(self, detailed_game=False): # flag if you want detailed game
if detailed_game:
self.print_board()
self.turn_function()
while True:
self.print_board()
possible_moves = self.possible_moves()
if not possible_moves:
print("no more possible moves")
pass
exit(0)
self.print_possible_moves()
x = raw_input("insert number of move")
self.make_move(possible_moves[int(x)].start, possible_moves[int(x)].end)
if detailed_game:
self.print_board()
raw_input()
self.turn_function(Move(possible_moves[int(x)].start, possible_moves[int(x)].end, True))
def update_unknowns(self):
for row in range(self.height):
for col in range(self.width):
if self.board[row, col].empty:
self.board[row, col] = Candy(randint(0, 5), (row, col))
def play_game_with_random(self, player, num_of_runs=1, detailed=False):
turns_counter = 0
self.turn_function(with_unknowns=False)
self.reset_param()
player.get_board(self)
for i in range(num_of_runs):
if detailed:
self.print_board()
# self.print_possible_moves()
player.choose_move()
best_move = player.get_best_move()
if detailed:
print(best_move)
self.make_move(best_move.start, best_move.end)
self.turn_function(move=best_move, with_unknowns=False)
if detailed:
self.print_board()
turns_counter += 1
print(self.score, self.striped_counter, self.wrapped_counter, self.chocolate_counter)
return self.score / turns_counter, self.striped_counter, self.wrapped_counter, self.chocolate_counter
def reset_param(self):
self.score = 0
self.striped_counter = 0
self.wrapped_counter = 0
self.chocolate_counter = 0
def evaluate_turn(self, score_coeff, stripe_coeff, wrapped_coeff, chocolate_coeff):
return score_coeff * self.score / 100 + stripe_coeff * self.striped_counter + wrapped_coeff * \
self.wrapped_counter \
+ chocolate_coeff * self.chocolate_counter
def main():
board_to_copy = np.array([[4, 2, 23, 2, 2], [2, 23, 2, 8, 6], [4, 8, 23, 4, 2], [12, 2, 4, 0, 6]])
board = Board(board_to_copy=board_to_copy)
board.play_a_game(True)
# board = Board(height=6, width=5)
# board_to_copy = np.array([[0, 0, 8, 2, 8], [4, 20, 19, 6, 2], [0, 2, 10, 2, 2], [10, 4, 0, 2, 10], [0, 8, 0, 8,
# 2], [8, 0, 8, 0, 8]])
# board.interpret_board(board_to_copy)
# board.print_board()
# board.turn_function()
# while True:
# board.print_board()
# possible_moves = board.possible_moves()
# if not possible_moves:
# print("no more possible moves")
# pass
# exit(0)
# board.print_possible_moves()
# x = raw_input("insert number of move")
# board.make_move(possible_moves[int(x)].start, possible_moves[int(x)].end)
# board.print_board()
# raw_input()
# board.turn_function(Move(possible_moves[int(x)].start, possible_moves[int(x)].end, True))
# def main():
# board = Board(height=3, width=5)111
# board_to_copy = np.array([[2,0,4,0,4],[0,0,2,0,4],[2,4,0,4,4]])
# board.interpret_board(board_to_copy)
# board.print_board()
# board.mark_candies_to_explode()
# board.print_board()
# board.print_matches()
# board.explosions()
# board.print_board()
# board.print_matches()
# board.cascade()
# board.mark_candies_to_explode()
# board.explosions()
# board.cascade()
# while True:
# board.print_board()
# possible_moves = board.possible_moves()
# board.print_possible_moves()
# x = raw_input("insert number of move")
# board.make_move(possible_moves[int(x)].start, possible_moves[int(x)].end)
# board.print_board()
# raw_input()
# board.mark_candies_to_explode(Move(possible_moves[int(x)].start, possible_moves[int(x)].end, True))
# board.explosions()
# board.cascade()
# if __name__ == '__main__':
| [
"Move.Move",
"numpy.array",
"numpy.zeros",
"numpy.ndarray",
"random.randint"
] | [((373, 403), 'Move.Move', 'Move', (['(-1, -1)', '(-1, -2)', '(True)'], {}), '((-1, -1), (-1, -2), True)\n', (377, 403), False, 'from Move import Move\n'), ((21479, 21566), 'numpy.array', 'np.array', (['[[4, 2, 23, 2, 2], [2, 23, 2, 8, 6], [4, 8, 23, 4, 2], [12, 2, 4, 0, 6]]'], {}), '([[4, 2, 23, 2, 2], [2, 23, 2, 8, 6], [4, 8, 23, 4, 2], [12, 2, 4, \n 0, 6]])\n', (21487, 21566), True, 'import numpy as np\n'), ((2422, 2467), 'numpy.ndarray', 'np.ndarray', (['numbers_board.shape'], {'dtype': 'object'}), '(numbers_board.shape, dtype=object)\n', (2432, 2467), True, 'import numpy as np\n'), ((2954, 3009), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.height, self.width)', 'dtype': 'object'}), '(shape=(self.height, self.width), dtype=object)\n', (2962, 3009), True, 'import numpy as np\n'), ((3138, 3173), 'random.randint', 'randint', (['(0)', '(self.num_of_candies - 1)'], {}), '(0, self.num_of_candies - 1)\n', (3145, 3173), False, 'from random import randint\n'), ((14427, 14465), 'Move.Move', 'Move', (['(row, col)', '(row, col + 1)', 'HORZ'], {}), '((row, col), (row, col + 1), HORZ)\n', (14431, 14465), False, 'from Move import Move\n'), ((15492, 15530), 'Move.Move', 'Move', (['(row, col)', '(row + 1, col)', 'VERT'], {}), '((row, col), (row + 1, col), VERT)\n', (15496, 15530), False, 'from Move import Move\n'), ((20031, 20044), 'random.randint', 'randint', (['(0)', '(5)'], {}), '(0, 5)\n', (20038, 20044), False, 'from random import randint\n')] |
import hashlib
import jpype
import os
import struct
from array import array
from os.path import dirname, abspath
def start_jvm():
jvm_path = jpype.getDefaultJVMPath()
jars_path = os.path.join(dirname(abspath(__file__)), "jars")
load_modules = ["commons-codec-1.6", "jpbc-plaf-1.2.0", "jpbc-api-1.2.0", "nics-crypto"]
load_modules = [os.path.join(jars_path, x) + ".jar" for x in load_modules]
jpype.startJVM(jvm_path, "-Djava.class.path=" + ":".join(load_modules))
AFGHGlobalParameters = AFGHProxyReEncryption = CurveElement = GTFiniteElement = None
Tuple = String = params = None
def init():
global AFGHGlobalParameters, AFGHProxyReEncryption, CurveElement, GTFiniteElement
global Tuple, String, params
start_jvm()
AFGHGlobalParameters = jpype.JClass("nics.crypto.proxy.afgh.AFGHGlobalParameters")
AFGHProxyReEncryption = jpype.JClass("nics.crypto.proxy.afgh.AFGHProxyReEncryption")
CurveElement = jpype.JClass("it.unisa.dia.gas.plaf.jpbc.field.curve.CurveElement")
GTFiniteElement = jpype.JClass("it.unisa.dia.gas.plaf.jpbc.field.gt.GTFiniteElement")
Tuple = jpype.JClass("nics.crypto.Tuple")
String = jpype.java.lang.String
params = AFGHGlobalParameters(256, 1536)
init()
def load_priv(s):
Zq = params.getZq()
return Zq.newElement(jpype.java.math.BigInteger(array("b", s)))
def dump_priv(priv):
return array("b", jpype.java.math.BigInteger.toByteArray(priv.toBigInteger())).tostring()
def load_pub(s):
"""Works for both public and re-encryption keys"""
el = CurveElement(params.getG1())
el.setFromBytesCompressed(array("b", s))
return el
def dump_pub(pub):
return array("b", pub.toBytesCompressed()).tostring()
def dump_gtf(el):
return array("b", el.toBytes()).tostring()
def load_gtf(m):
x = GTFiniteElement(params.getZ()).duplicate()
b = array("b", m)
x.setFromBytes(b)
return x
def dump_re_message(m):
return dump_gtf(m.get(1)) + dump_gtf(m.get(2))
def load_re_message(m):
x1 = load_gtf(m[:len(m) // 2])
x2 = load_gtf(m[len(m) // 2:])
return Tuple([x1, x2])
def dump_e_message(m):
x1 = dump_pub(m.get(1))
x2 = dump_gtf(m.get(2))
fmt = struct.pack("HH", len(x1), len(x2))
return fmt + x1 + x2
def load_e_message(m):
l1, l2 = struct.unpack("HH", m[:4])
x1 = load_pub(m[4:(4 + l1)])
x2 = load_gtf(m[(4 + l1):])
return Tuple([x1, x2])
class Key(object):
def __init__(self, priv=None, pub=None):
"""
:param str dump: Load private key from dump
If there is no dump, will be generated
"""
self.priv = priv
self.pub = pub
if self.priv:
priv_inv = priv.duplicate()
self.priv_invert = priv_inv.invert()
if self.pub:
self.pub_pow = self.pub.pow()
@classmethod
def load_priv(cls, s, generate_pub=True):
priv = load_priv(s)
if generate_pub:
pub = AFGHProxyReEncryption.generatePublicKey(priv, params)
else:
pub = None
return cls(priv=priv, pub=pub)
@classmethod
def from_passphrase(cls, passphrase, generate_pub=True):
return cls.load_priv(hashlib.sha256(passphrase.encode('utf8')).digest(), generate_pub=generate_pub)
def dump_priv(self):
return dump_priv(self.priv)
def dump_pub(self):
return dump_pub(self.pub)
@classmethod
def make_priv(cls, generate_pub=True):
priv = AFGHProxyReEncryption.generateSecretKey(params)
if generate_pub:
pub = AFGHProxyReEncryption.generatePublicKey(priv, params)
else:
pub = None
return cls(priv=priv, pub=pub)
@classmethod
def load_pub(cls, s):
return cls(pub=load_pub(s))
def encrypt(self, message):
e = AFGHProxyReEncryption.bytesToElement(array("b", message), params.getG2())
c_a = AFGHProxyReEncryption.secondLevelEncryption(e, self.pub_pow, params)
return dump_e_message(c_a)
def decrypt_my(self, s):
c_a = load_e_message(s)
m = AFGHProxyReEncryption.secondLevelDecryption(c_a, self.priv, params)
return array("b", m.toBytes()).tostring().strip("\x00")
def decrypt_re(self, s):
c_b = load_re_message(s)
m = AFGHProxyReEncryption.firstLevelDecryptionPreProcessing(c_b, self.priv_invert, params)
r1 = array("b", m.toBytes())
r2 = r1.tostring()
return r2.strip(b"\x00")
def re_key(self, pub):
if isinstance(pub, Key):
pub = pub.pub
# elif isinstance(pub, basestring):
else:
pub = load_pub(pub)
return ReKey.make(self.priv, pub)
class ReKey(object):
def __init__(self, key):
self.key = key
self.key_ppp = params.getE().pairing(key)
@classmethod
def make(cls, priv, pub):
if isinstance(priv, Key):
priv = priv.priv
if isinstance(pub, Key):
pub = pub.pub
key = AFGHProxyReEncryption.generateReEncryptionKey(pub.duplicate(), priv.duplicate())
return cls(key)
def dump(self):
return dump_pub(self.key)
@classmethod
def load(cls, s):
return cls(load_pub(s))
def reencrypt(self, m):
c_b = AFGHProxyReEncryption.reEncryption(load_e_message(m), self.key, self.key_ppp)
return dump_re_message(c_b)
| [
"array.array",
"jpype.getDefaultJVMPath",
"os.path.join",
"struct.unpack",
"os.path.abspath",
"jpype.JClass"
] | [((148, 173), 'jpype.getDefaultJVMPath', 'jpype.getDefaultJVMPath', ([], {}), '()\n', (171, 173), False, 'import jpype\n'), ((782, 841), 'jpype.JClass', 'jpype.JClass', (['"""nics.crypto.proxy.afgh.AFGHGlobalParameters"""'], {}), "('nics.crypto.proxy.afgh.AFGHGlobalParameters')\n", (794, 841), False, 'import jpype\n'), ((870, 930), 'jpype.JClass', 'jpype.JClass', (['"""nics.crypto.proxy.afgh.AFGHProxyReEncryption"""'], {}), "('nics.crypto.proxy.afgh.AFGHProxyReEncryption')\n", (882, 930), False, 'import jpype\n'), ((950, 1017), 'jpype.JClass', 'jpype.JClass', (['"""it.unisa.dia.gas.plaf.jpbc.field.curve.CurveElement"""'], {}), "('it.unisa.dia.gas.plaf.jpbc.field.curve.CurveElement')\n", (962, 1017), False, 'import jpype\n'), ((1040, 1107), 'jpype.JClass', 'jpype.JClass', (['"""it.unisa.dia.gas.plaf.jpbc.field.gt.GTFiniteElement"""'], {}), "('it.unisa.dia.gas.plaf.jpbc.field.gt.GTFiniteElement')\n", (1052, 1107), False, 'import jpype\n'), ((1120, 1153), 'jpype.JClass', 'jpype.JClass', (['"""nics.crypto.Tuple"""'], {}), "('nics.crypto.Tuple')\n", (1132, 1153), False, 'import jpype\n'), ((1868, 1881), 'array.array', 'array', (['"""b"""', 'm'], {}), "('b', m)\n", (1873, 1881), False, 'from array import array\n'), ((2307, 2333), 'struct.unpack', 'struct.unpack', (['"""HH"""', 'm[:4]'], {}), "('HH', m[:4])\n", (2320, 2333), False, 'import struct\n'), ((1615, 1628), 'array.array', 'array', (['"""b"""', 's'], {}), "('b', s)\n", (1620, 1628), False, 'from array import array\n'), ((211, 228), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (218, 228), False, 'from os.path import dirname, abspath\n'), ((352, 378), 'os.path.join', 'os.path.join', (['jars_path', 'x'], {}), '(jars_path, x)\n', (364, 378), False, 'import os\n'), ((1340, 1353), 'array.array', 'array', (['"""b"""', 's'], {}), "('b', s)\n", (1345, 1353), False, 'from array import array\n'), ((3875, 3894), 'array.array', 'array', (['"""b"""', 'message'], {}), "('b', message)\n", (3880, 3894), False, 'from array import array\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Fastext Common Crawl vectors, e.g. use with filename
"models:fasttext_fi_vectors/crawl-300d-fi.vec".
"""
import os
import torchtext.vocab as vocab
URL = 'https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.fi.300.vec.gz'
def download(datapath):
return vocab.Vectors(
name='cc.fi.300.vec.gz',
url=URL,
cache=os.path.join(datapath, 'models', 'fasttext_fi_vectors'),
)
| [
"os.path.join"
] | [((547, 602), 'os.path.join', 'os.path.join', (['datapath', '"""models"""', '"""fasttext_fi_vectors"""'], {}), "(datapath, 'models', 'fasttext_fi_vectors')\n", (559, 602), False, 'import os\n')] |
from django.urls import path
from . import views
app_name = 'twitteruser'
urlpatterns = [
path('', views.home_view, name='home_view'),
path('profile/<int:user_id>/', views.profile_view, name='profile_view'),
path('follow/<int:user_id>/', views.follow_user, name='follow_user'),
path('unfollow/<int:user_id>/', views.unfollow_user, name='unfollow_user'),
] | [
"django.urls.path"
] | [((96, 139), 'django.urls.path', 'path', (['""""""', 'views.home_view'], {'name': '"""home_view"""'}), "('', views.home_view, name='home_view')\n", (100, 139), False, 'from django.urls import path\n'), ((145, 216), 'django.urls.path', 'path', (['"""profile/<int:user_id>/"""', 'views.profile_view'], {'name': '"""profile_view"""'}), "('profile/<int:user_id>/', views.profile_view, name='profile_view')\n", (149, 216), False, 'from django.urls import path\n'), ((222, 290), 'django.urls.path', 'path', (['"""follow/<int:user_id>/"""', 'views.follow_user'], {'name': '"""follow_user"""'}), "('follow/<int:user_id>/', views.follow_user, name='follow_user')\n", (226, 290), False, 'from django.urls import path\n'), ((296, 370), 'django.urls.path', 'path', (['"""unfollow/<int:user_id>/"""', 'views.unfollow_user'], {'name': '"""unfollow_user"""'}), "('unfollow/<int:user_id>/', views.unfollow_user, name='unfollow_user')\n", (300, 370), False, 'from django.urls import path\n')] |
"""newskylabs/tools/bookblock/__main__.py:
Main entry point for the `bookblock` tool.
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright 2019 <NAME>"
__license__ = "Apache License 2.0, http://www.apache.org/licenses/LICENSE-2.0"
__date__ = "2019/10/11"
__version__ = '0.0.1.dev1'
from newskylabs.tools.bookblock.scripts.bookblock import bookblock
## =========================================================
## Main
## ---------------------------------------------------------
if __name__ == '__main__':
bookblock()
## =========================================================
## =========================================================
## fin.
| [
"newskylabs.tools.bookblock.scripts.bookblock.bookblock"
] | [((568, 579), 'newskylabs.tools.bookblock.scripts.bookblock.bookblock', 'bookblock', ([], {}), '()\n', (577, 579), False, 'from newskylabs.tools.bookblock.scripts.bookblock import bookblock\n')] |
import requests
from pprint import pprint
client_id = "58efa175549cbbe34857"
client_secret = "a70cc1472f395ffe456b64b6ab018644"
concrete_params = {
"client_id": client_id,
"client_secret": client_secret
}
base_url = "https://api.artsy.net/api/tokens/xapp_token"
r = requests.post(base_url, params=concrete_params)
pprint(r.text)
headers = {
"X-XAPP-Token": r.json()['token']
}
url = "https://api.artsy.net/api/sales"
r = requests.get(url, headers=headers)
pprint(r.json())
if r.ok:
import json
path = "sales.json"
with open(path, "w") as f:
json.dump(r.json(), f) | [
"requests.post",
"pprint.pprint",
"requests.get"
] | [((276, 323), 'requests.post', 'requests.post', (['base_url'], {'params': 'concrete_params'}), '(base_url, params=concrete_params)\n', (289, 323), False, 'import requests\n'), ((324, 338), 'pprint.pprint', 'pprint', (['r.text'], {}), '(r.text)\n', (330, 338), False, 'from pprint import pprint\n'), ((437, 471), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (449, 471), False, 'import requests\n')] |
from misc_math import convert_to_base, base_to_dec
from dictionary import dict_sort
class Converter:
"""A class to convert strings to compact integers using a custom codec/lookup."""
def __init__(self, lookup=None):
if lookup == None:
self.lookup = {}
else:
self.lookup = lookup
self.codec = ''.join(dict_sort(lookup))
@classmethod
def from_codec(codec: str):
"""Creates a Converter object from a codec."""
lookup = {}
for x in range(len(codec)):
lookup[codec[x]] = x + 1
return Converter(lookup)
def fit(self, strings):
"""Takes in a string or list of strings, and changes the lookup/codec to fit the string(s)."""
if type(strings) == list:
string = '\n'.join(strings)
else:
string = strings
# Find and count characters
for x in string:
if x not in self.lookup:
self.lookup[x] = string.count(x)
else:
self.lookup[x] = self.lookup.get(x) + string.count(x)
if type(strings) == list:
self.lookup['\n'] = self.lookup.get('\n') - len(strings) + 1 # Correct \n count
if self.lookup.get('\n') == 0:
del self.lookup['\n']
self.codec = ''.join(dict_sort(self.lookup))
def stoi(self, string: str) -> int:
"""Converts strings to integers based on the lookup/codec."""
return base_to_dec(string, len(self.codec), self.codec)
def itos(self, number: int) -> str:
"""Converts integers to strings based on the lookup/codec."""
return convert_to_base(number, len(self.codec), self.codec)
def __str__(self):
return "Converter({})/Converter.from_codec({})".format(self.lookup, self.codec) | [
"dictionary.dict_sort"
] | [((358, 375), 'dictionary.dict_sort', 'dict_sort', (['lookup'], {}), '(lookup)\n', (367, 375), False, 'from dictionary import dict_sort\n'), ((1336, 1358), 'dictionary.dict_sort', 'dict_sort', (['self.lookup'], {}), '(self.lookup)\n', (1345, 1358), False, 'from dictionary import dict_sort\n')] |
Subsets and Splits