repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
RedHatInsights/insights-core
insights/client/__init__.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/__init__.py#L356-L375
def upload(self, payload=None, content_type=None): """ Upload the archive at `path` with content type `content_type` returns (int): upload status code """ # platform - prefer the value passed in to func over config payload = payload or self.config.payload content_type = content_type or self.config.content_type if payload is None: raise ValueError('Specify a file to upload.') if not os.path.exists(payload): raise IOError('Cannot upload %s: File does not exist.' % payload) upload_results = client.upload( self.config, self.connection, payload, content_type) # return api response return upload_results
[ "def", "upload", "(", "self", ",", "payload", "=", "None", ",", "content_type", "=", "None", ")", ":", "# platform - prefer the value passed in to func over config", "payload", "=", "payload", "or", "self", ".", "config", ".", "payload", "content_type", "=", "content_type", "or", "self", ".", "config", ".", "content_type", "if", "payload", "is", "None", ":", "raise", "ValueError", "(", "'Specify a file to upload.'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "payload", ")", ":", "raise", "IOError", "(", "'Cannot upload %s: File does not exist.'", "%", "payload", ")", "upload_results", "=", "client", ".", "upload", "(", "self", ".", "config", ",", "self", ".", "connection", ",", "payload", ",", "content_type", ")", "# return api response", "return", "upload_results" ]
Upload the archive at `path` with content type `content_type` returns (int): upload status code
[ "Upload", "the", "archive", "at", "path", "with", "content", "type", "content_type", "returns", "(", "int", ")", ":", "upload", "status", "code" ]
python
train
36.45
robotools/fontParts
Lib/fontParts/base/normalizers.py
https://github.com/robotools/fontParts/blob/d2ff106fe95f9d566161d936a645157626568712/Lib/fontParts/base/normalizers.py#L708-L718
def normalizeInternalObjectType(value, cls, name): """ Normalizes an internal object type. * **value** must be a instance of **cls**. * Returned value is the same type as the input value. """ if not isinstance(value, cls): raise TypeError("%s must be a %s instance, not %s." % (name, name, type(value).__name__)) return value
[ "def", "normalizeInternalObjectType", "(", "value", ",", "cls", ",", "name", ")", ":", "if", "not", "isinstance", "(", "value", ",", "cls", ")", ":", "raise", "TypeError", "(", "\"%s must be a %s instance, not %s.\"", "%", "(", "name", ",", "name", ",", "type", "(", "value", ")", ".", "__name__", ")", ")", "return", "value" ]
Normalizes an internal object type. * **value** must be a instance of **cls**. * Returned value is the same type as the input value.
[ "Normalizes", "an", "internal", "object", "type", "." ]
python
train
34.181818
holtjma/msbwt
MUS/MSBWTGen.py
https://github.com/holtjma/msbwt/blob/7503346ec072ddb89520db86fef85569a9ba093a/MUS/MSBWTGen.py#L789-L979
def mergeNewMSBWT(mergedDir, inputBwtDirs, numProcs, logger): ''' This function will take a list of input BWTs (compressed or not) and merge them into a single BWT @param mergedFN - the destination for the final merged MSBWT @param inputBWTFN1 - the fn of the first BWT to merge @param inputBWTFN2 - the fn of the second BWT to merge @param numProcs - number of processes we're allowed to use @param logger - output goes here ''' st = time.time() iterst = time.time() vcLen = 6 #TODO: take advantage of these to skip an iteration or two perhaps numInputs = len(inputBwtDirs) msbwts = [None]*numInputs mergedLength = 0 for i, dirName in enumerate(inputBwtDirs): ''' NOTE: in practice, since we're allowing for multiprocessing, we construct the FM-index for each input BWT simply because in the long run, this allows us to figure out how to start processing chunks separately. Without this, we would need to track extra information that really just represent the FM-index. ''' msbwts[i] = MultiStringBWT.loadBWT(dirName, logger) mergedLength += msbwts[i].totalSize #binSize = 2**1#small bin debugging #binSize = 2**15#this one is just for test purposes, makes easy to debug things #binSize = 2**25#diff in 22-23 is not that much, 23-24 was 8 seconds of difference, so REALLY no diff binSize = 2**28 #allocate the mergedBWT space logger.info('Allocating space on disk...') mergedBWT = np.lib.format.open_memmap(mergedDir+'/msbwt.npy', 'w+', '<u1', (mergedLength,)) #this one will create the array using bits logger.info('Initializing iterations...') placeArray = np.lib.format.open_memmap(mergedDir+'/temp.0.npy', 'w+', '<u1', (mergedBWT.shape[0],)) copiedPlaceArray = np.lib.format.open_memmap(mergedDir+'/temp.1.npy', 'w+', '<u1', (mergedBWT.shape[0],)) start = msbwts[0].totalSize end = 0 #fill out the initial array with 0s, 1s, 2s, etc. as our initial condition for i, msbwt in enumerate(msbwts): end += msbwt.getTotalSize() placeArray[start:end].fill(i) copiedPlaceArray[start:end].fill(i) start = end #create something to track the offsets #TODO: x/binSize + 1 makes one too many bins if it's exactly divisible by binSize, ex: 4 length BWT with binSize 2 nextBinHasChanged = np.ones(dtype='b', shape=(mergedBWT.shape[0]/binSize+1,)) prevOffsetCounts = np.zeros(dtype='<u8', shape=(mergedBWT.shape[0]/binSize+1, numInputs)) currOffsetCounts = np.zeros(dtype='<u8', shape=(mergedBWT.shape[0]/binSize+1, numInputs)) nextOffsetCounts = np.zeros(dtype='<u8', shape=(mergedBWT.shape[0]/binSize+1, numInputs)) binUpdates = [{}]*(mergedBWT.shape[0]/binSize+1) bwtInd = 0 offsets = [0]*numInputs for x in xrange(0, currOffsetCounts.shape[0]): #set, then change for next iter nextOffsetCounts[x] = offsets remaining = binSize while remaining > 0 and bwtInd < numInputs: if remaining > msbwts[bwtInd].totalSize-offsets[bwtInd]: remaining -= msbwts[bwtInd].totalSize-offsets[bwtInd] offsets[bwtInd] = msbwts[bwtInd].totalSize bwtInd += 1 else: offsets[bwtInd] += remaining remaining = 0 ignored = 0 #original sys.stdout.write('\rcp ') sys.stdout.flush() del copiedPlaceArray needsMoreIterations = True i = 0 sameOffsetCount = 0 while needsMoreIterations: prevOffsetCounts = currOffsetCounts currOffsetCounts = nextOffsetCounts nextOffsetCounts = np.zeros(dtype='<u8', shape=(mergedBWT.shape[0]/binSize+1, numInputs)) needsMoreIterations = False sameOffsetCount = 0 #this method uses a condensed byte and will ignore regions that are already finished sys.stdout.write('\rld ') sys.stdout.flush() ignored = 0 iteret = time.time() sys.stdout.write('\r') logger.info('Finished iter '+str(i)+' in '+str(iteret-iterst)+'seconds') iterst = time.time() i += 1 sys.stdout.write('\rld') sys.stdout.flush() #track which bins are actually different binHasChanged = nextBinHasChanged nextBinHasChanged = np.zeros(dtype='b', shape=(mergedBWT.shape[0]/binSize+1)) tups = [] for x in xrange(0, mergedBWT.shape[0]/binSize + 1): #check if the current offset matches the previous iteration offset sameOffset = np.array_equal(currOffsetCounts[x], prevOffsetCounts[x]) if sameOffset: sameOffsetCount += 1 ''' TODO: the below False is there because this only works if you do a full file copy right now. It's because unless we copy, then the appropriate parts of the nextPlaceArray isn't properly updated. It's unclear whether one of these is better than the other in terms of performance. File copying is slow, but if only a couple sequences are similar then then skipping is good. I think in general, we only skip at the beginning for real data though, so I'm going with the no-skip, no-copy form until I can resolve the problem (if there's a resolution). ''' if False and not binHasChanged[x] and sameOffset: for key in binUpdates[x]: nextOffsetCounts[key] += binUpdates[x][key] ignored += 1 else: #note these are swapped depending on the iteration, saves time since there is no file copying if i % 2 == 0: tup = (x, binSize, vcLen, currOffsetCounts[x], mergedDir+'/temp.0.npy', mergedDir+'/temp.1.npy', inputBwtDirs) else: tup = (x, binSize, vcLen, currOffsetCounts[x], mergedDir+'/temp.1.npy', mergedDir+'/temp.0.npy', inputBwtDirs) tups.append(tup) if numProcs > 1: #TODO: tinker with chunksize, it might matter myPool = multiprocessing.Pool(numProcs) #myPool = multiprocessing.pool.ThreadPool(numProcs) rets = myPool.imap(mergeNewMSBWTPoolCall, tups, chunksize=10) else: rets = [] for tup in tups: rets.append(mergeNewMSBWTPoolCall(tup)) progressCounter = ignored sys.stdout.write('\r'+str(100*progressCounter*binSize/mergedBWT.shape[0])+'%') sys.stdout.flush() for ret in rets: #iterate through the returns so we can figure out information necessary for continuation (x, nBHC, nOC, nMI) = ret binUpdates[x] = nOC for k in nBHC: nextBinHasChanged[k] |= nBHC[k] for b in nOC: nextOffsetCounts[b] += nOC[b] needsMoreIterations |= nMI progressCounter += 1 sys.stdout.write('\r'+str(min(100*progressCounter*binSize/mergedBWT.shape[0], 100))+'%') sys.stdout.flush() nextOffsetCounts = np.cumsum(nextOffsetCounts, axis=0)-nextOffsetCounts if numProcs > 1: myPool.terminate() myPool.join() myPool = None sys.stdout.write('\r') sys.stdout.flush() logger.info('Order solved, saving final array...') #TODO: make this better offsets = np.zeros(dtype='<u8', shape=(numInputs,)) for i in xrange(0, mergedBWT.shape[0]/binSize+1): ind = placeArray[i*binSize:(i+1)*binSize] if i == mergedBWT.shape[0]/binSize: ind = ind[0:mergedBWT.shape[0]-i*binSize] bc = np.bincount(ind, minlength=numInputs) for x in xrange(0, numInputs): mergedBWT[np.add(i*binSize, np.where(ind == x))] = msbwts[x].getBWTRange(int(offsets[x]), int(offsets[x]+bc[x])) offsets += bc et = time.time() logger.info('Finished all merge iterations in '+str(et-st)+' seconds.')
[ "def", "mergeNewMSBWT", "(", "mergedDir", ",", "inputBwtDirs", ",", "numProcs", ",", "logger", ")", ":", "st", "=", "time", ".", "time", "(", ")", "iterst", "=", "time", ".", "time", "(", ")", "vcLen", "=", "6", "#TODO: take advantage of these to skip an iteration or two perhaps", "numInputs", "=", "len", "(", "inputBwtDirs", ")", "msbwts", "=", "[", "None", "]", "*", "numInputs", "mergedLength", "=", "0", "for", "i", ",", "dirName", "in", "enumerate", "(", "inputBwtDirs", ")", ":", "'''\n NOTE: in practice, since we're allowing for multiprocessing, we construct the FM-index for each input BWT\n simply because in the long run, this allows us to figure out how to start processing chunks separately.\n Without this, we would need to track extra information that really just represent the FM-index.\n '''", "msbwts", "[", "i", "]", "=", "MultiStringBWT", ".", "loadBWT", "(", "dirName", ",", "logger", ")", "mergedLength", "+=", "msbwts", "[", "i", "]", ".", "totalSize", "#binSize = 2**1#small bin debugging", "#binSize = 2**15#this one is just for test purposes, makes easy to debug things", "#binSize = 2**25#diff in 22-23 is not that much, 23-24 was 8 seconds of difference, so REALLY no diff", "binSize", "=", "2", "**", "28", "#allocate the mergedBWT space", "logger", ".", "info", "(", "'Allocating space on disk...'", ")", "mergedBWT", "=", "np", ".", "lib", ".", "format", ".", "open_memmap", "(", "mergedDir", "+", "'/msbwt.npy'", ",", "'w+'", ",", "'<u1'", ",", "(", "mergedLength", ",", ")", ")", "#this one will create the array using bits", "logger", ".", "info", "(", "'Initializing iterations...'", ")", "placeArray", "=", "np", ".", "lib", ".", "format", ".", "open_memmap", "(", "mergedDir", "+", "'/temp.0.npy'", ",", "'w+'", ",", "'<u1'", ",", "(", "mergedBWT", ".", "shape", "[", "0", "]", ",", ")", ")", "copiedPlaceArray", "=", "np", ".", "lib", ".", "format", ".", "open_memmap", "(", "mergedDir", "+", "'/temp.1.npy'", ",", "'w+'", ",", "'<u1'", ",", "(", "mergedBWT", ".", "shape", "[", "0", "]", ",", ")", ")", "start", "=", "msbwts", "[", "0", "]", ".", "totalSize", "end", "=", "0", "#fill out the initial array with 0s, 1s, 2s, etc. as our initial condition", "for", "i", ",", "msbwt", "in", "enumerate", "(", "msbwts", ")", ":", "end", "+=", "msbwt", ".", "getTotalSize", "(", ")", "placeArray", "[", "start", ":", "end", "]", ".", "fill", "(", "i", ")", "copiedPlaceArray", "[", "start", ":", "end", "]", ".", "fill", "(", "i", ")", "start", "=", "end", "#create something to track the offsets", "#TODO: x/binSize + 1 makes one too many bins if it's exactly divisible by binSize, ex: 4 length BWT with binSize 2", "nextBinHasChanged", "=", "np", ".", "ones", "(", "dtype", "=", "'b'", ",", "shape", "=", "(", "mergedBWT", ".", "shape", "[", "0", "]", "/", "binSize", "+", "1", ",", ")", ")", "prevOffsetCounts", "=", "np", ".", "zeros", "(", "dtype", "=", "'<u8'", ",", "shape", "=", "(", "mergedBWT", ".", "shape", "[", "0", "]", "/", "binSize", "+", "1", ",", "numInputs", ")", ")", "currOffsetCounts", "=", "np", ".", "zeros", "(", "dtype", "=", "'<u8'", ",", "shape", "=", "(", "mergedBWT", ".", "shape", "[", "0", "]", "/", "binSize", "+", "1", ",", "numInputs", ")", ")", "nextOffsetCounts", "=", "np", ".", "zeros", "(", "dtype", "=", "'<u8'", ",", "shape", "=", "(", "mergedBWT", ".", "shape", "[", "0", "]", "/", "binSize", "+", "1", ",", "numInputs", ")", ")", "binUpdates", "=", "[", "{", "}", "]", "*", "(", "mergedBWT", ".", "shape", "[", "0", "]", "/", "binSize", "+", "1", ")", "bwtInd", "=", "0", "offsets", "=", "[", "0", "]", "*", "numInputs", "for", "x", "in", "xrange", "(", "0", ",", "currOffsetCounts", ".", "shape", "[", "0", "]", ")", ":", "#set, then change for next iter", "nextOffsetCounts", "[", "x", "]", "=", "offsets", "remaining", "=", "binSize", "while", "remaining", ">", "0", "and", "bwtInd", "<", "numInputs", ":", "if", "remaining", ">", "msbwts", "[", "bwtInd", "]", ".", "totalSize", "-", "offsets", "[", "bwtInd", "]", ":", "remaining", "-=", "msbwts", "[", "bwtInd", "]", ".", "totalSize", "-", "offsets", "[", "bwtInd", "]", "offsets", "[", "bwtInd", "]", "=", "msbwts", "[", "bwtInd", "]", ".", "totalSize", "bwtInd", "+=", "1", "else", ":", "offsets", "[", "bwtInd", "]", "+=", "remaining", "remaining", "=", "0", "ignored", "=", "0", "#original", "sys", ".", "stdout", ".", "write", "(", "'\\rcp '", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "del", "copiedPlaceArray", "needsMoreIterations", "=", "True", "i", "=", "0", "sameOffsetCount", "=", "0", "while", "needsMoreIterations", ":", "prevOffsetCounts", "=", "currOffsetCounts", "currOffsetCounts", "=", "nextOffsetCounts", "nextOffsetCounts", "=", "np", ".", "zeros", "(", "dtype", "=", "'<u8'", ",", "shape", "=", "(", "mergedBWT", ".", "shape", "[", "0", "]", "/", "binSize", "+", "1", ",", "numInputs", ")", ")", "needsMoreIterations", "=", "False", "sameOffsetCount", "=", "0", "#this method uses a condensed byte and will ignore regions that are already finished", "sys", ".", "stdout", ".", "write", "(", "'\\rld '", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "ignored", "=", "0", "iteret", "=", "time", ".", "time", "(", ")", "sys", ".", "stdout", ".", "write", "(", "'\\r'", ")", "logger", ".", "info", "(", "'Finished iter '", "+", "str", "(", "i", ")", "+", "' in '", "+", "str", "(", "iteret", "-", "iterst", ")", "+", "'seconds'", ")", "iterst", "=", "time", ".", "time", "(", ")", "i", "+=", "1", "sys", ".", "stdout", ".", "write", "(", "'\\rld'", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "#track which bins are actually different", "binHasChanged", "=", "nextBinHasChanged", "nextBinHasChanged", "=", "np", ".", "zeros", "(", "dtype", "=", "'b'", ",", "shape", "=", "(", "mergedBWT", ".", "shape", "[", "0", "]", "/", "binSize", "+", "1", ")", ")", "tups", "=", "[", "]", "for", "x", "in", "xrange", "(", "0", ",", "mergedBWT", ".", "shape", "[", "0", "]", "/", "binSize", "+", "1", ")", ":", "#check if the current offset matches the previous iteration offset", "sameOffset", "=", "np", ".", "array_equal", "(", "currOffsetCounts", "[", "x", "]", ",", "prevOffsetCounts", "[", "x", "]", ")", "if", "sameOffset", ":", "sameOffsetCount", "+=", "1", "'''\n TODO: the below False is there because this only works if you do a full file copy right now. It's\n because unless we copy, then the appropriate parts of the nextPlaceArray isn't properly updated. It's\n unclear whether one of these is better than the other in terms of performance. File copying is slow, but\n if only a couple sequences are similar then then skipping is good. I think in general, we only skip at the\n beginning for real data though, so I'm going with the no-skip, no-copy form until I can resolve the\n problem (if there's a resolution).\n '''", "if", "False", "and", "not", "binHasChanged", "[", "x", "]", "and", "sameOffset", ":", "for", "key", "in", "binUpdates", "[", "x", "]", ":", "nextOffsetCounts", "[", "key", "]", "+=", "binUpdates", "[", "x", "]", "[", "key", "]", "ignored", "+=", "1", "else", ":", "#note these are swapped depending on the iteration, saves time since there is no file copying", "if", "i", "%", "2", "==", "0", ":", "tup", "=", "(", "x", ",", "binSize", ",", "vcLen", ",", "currOffsetCounts", "[", "x", "]", ",", "mergedDir", "+", "'/temp.0.npy'", ",", "mergedDir", "+", "'/temp.1.npy'", ",", "inputBwtDirs", ")", "else", ":", "tup", "=", "(", "x", ",", "binSize", ",", "vcLen", ",", "currOffsetCounts", "[", "x", "]", ",", "mergedDir", "+", "'/temp.1.npy'", ",", "mergedDir", "+", "'/temp.0.npy'", ",", "inputBwtDirs", ")", "tups", ".", "append", "(", "tup", ")", "if", "numProcs", ">", "1", ":", "#TODO: tinker with chunksize, it might matter", "myPool", "=", "multiprocessing", ".", "Pool", "(", "numProcs", ")", "#myPool = multiprocessing.pool.ThreadPool(numProcs)", "rets", "=", "myPool", ".", "imap", "(", "mergeNewMSBWTPoolCall", ",", "tups", ",", "chunksize", "=", "10", ")", "else", ":", "rets", "=", "[", "]", "for", "tup", "in", "tups", ":", "rets", ".", "append", "(", "mergeNewMSBWTPoolCall", "(", "tup", ")", ")", "progressCounter", "=", "ignored", "sys", ".", "stdout", ".", "write", "(", "'\\r'", "+", "str", "(", "100", "*", "progressCounter", "*", "binSize", "/", "mergedBWT", ".", "shape", "[", "0", "]", ")", "+", "'%'", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "for", "ret", "in", "rets", ":", "#iterate through the returns so we can figure out information necessary for continuation", "(", "x", ",", "nBHC", ",", "nOC", ",", "nMI", ")", "=", "ret", "binUpdates", "[", "x", "]", "=", "nOC", "for", "k", "in", "nBHC", ":", "nextBinHasChanged", "[", "k", "]", "|=", "nBHC", "[", "k", "]", "for", "b", "in", "nOC", ":", "nextOffsetCounts", "[", "b", "]", "+=", "nOC", "[", "b", "]", "needsMoreIterations", "|=", "nMI", "progressCounter", "+=", "1", "sys", ".", "stdout", ".", "write", "(", "'\\r'", "+", "str", "(", "min", "(", "100", "*", "progressCounter", "*", "binSize", "/", "mergedBWT", ".", "shape", "[", "0", "]", ",", "100", ")", ")", "+", "'%'", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "nextOffsetCounts", "=", "np", ".", "cumsum", "(", "nextOffsetCounts", ",", "axis", "=", "0", ")", "-", "nextOffsetCounts", "if", "numProcs", ">", "1", ":", "myPool", ".", "terminate", "(", ")", "myPool", ".", "join", "(", ")", "myPool", "=", "None", "sys", ".", "stdout", ".", "write", "(", "'\\r'", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "logger", ".", "info", "(", "'Order solved, saving final array...'", ")", "#TODO: make this better", "offsets", "=", "np", ".", "zeros", "(", "dtype", "=", "'<u8'", ",", "shape", "=", "(", "numInputs", ",", ")", ")", "for", "i", "in", "xrange", "(", "0", ",", "mergedBWT", ".", "shape", "[", "0", "]", "/", "binSize", "+", "1", ")", ":", "ind", "=", "placeArray", "[", "i", "*", "binSize", ":", "(", "i", "+", "1", ")", "*", "binSize", "]", "if", "i", "==", "mergedBWT", ".", "shape", "[", "0", "]", "/", "binSize", ":", "ind", "=", "ind", "[", "0", ":", "mergedBWT", ".", "shape", "[", "0", "]", "-", "i", "*", "binSize", "]", "bc", "=", "np", ".", "bincount", "(", "ind", ",", "minlength", "=", "numInputs", ")", "for", "x", "in", "xrange", "(", "0", ",", "numInputs", ")", ":", "mergedBWT", "[", "np", ".", "add", "(", "i", "*", "binSize", ",", "np", ".", "where", "(", "ind", "==", "x", ")", ")", "]", "=", "msbwts", "[", "x", "]", ".", "getBWTRange", "(", "int", "(", "offsets", "[", "x", "]", ")", ",", "int", "(", "offsets", "[", "x", "]", "+", "bc", "[", "x", "]", ")", ")", "offsets", "+=", "bc", "et", "=", "time", ".", "time", "(", ")", "logger", ".", "info", "(", "'Finished all merge iterations in '", "+", "str", "(", "et", "-", "st", ")", "+", "' seconds.'", ")" ]
This function will take a list of input BWTs (compressed or not) and merge them into a single BWT @param mergedFN - the destination for the final merged MSBWT @param inputBWTFN1 - the fn of the first BWT to merge @param inputBWTFN2 - the fn of the second BWT to merge @param numProcs - number of processes we're allowed to use @param logger - output goes here
[ "This", "function", "will", "take", "a", "list", "of", "input", "BWTs", "(", "compressed", "or", "not", ")", "and", "merge", "them", "into", "a", "single", "BWT" ]
python
train
42.387435
matthewdeanmartin/jiggle_version
sample_projects/ver_in_weird_file/setup_helpers.py
https://github.com/matthewdeanmartin/jiggle_version/blob/963656a0a47b7162780a5f6c8f4b8bbbebc148f5/sample_projects/ver_in_weird_file/setup_helpers.py#L125-L134
def long_description(*filenames): """Provide a long description.""" res = [''] for filename in filenames: with open(filename) as fp: for line in fp: res.append(' ' + line) res.append('') res.append('\n') return EMPTYSTRING.join(res)
[ "def", "long_description", "(", "*", "filenames", ")", ":", "res", "=", "[", "''", "]", "for", "filename", "in", "filenames", ":", "with", "open", "(", "filename", ")", "as", "fp", ":", "for", "line", "in", "fp", ":", "res", ".", "append", "(", "' '", "+", "line", ")", "res", ".", "append", "(", "''", ")", "res", ".", "append", "(", "'\\n'", ")", "return", "EMPTYSTRING", ".", "join", "(", "res", ")" ]
Provide a long description.
[ "Provide", "a", "long", "description", "." ]
python
train
29.7
log2timeline/plaso
plaso/multi_processing/psort.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/multi_processing/psort.py#L635-L648
def _StatusUpdateThreadMain(self): """Main function of the status update thread.""" while self._status_update_active: # Make a local copy of the PIDs in case the dict is changed by # the main thread. for pid in list(self._process_information_per_pid.keys()): self._CheckStatusAnalysisProcess(pid) self._UpdateForemanProcessStatus() if self._status_update_callback: self._status_update_callback(self._processing_status) time.sleep(self._STATUS_UPDATE_INTERVAL)
[ "def", "_StatusUpdateThreadMain", "(", "self", ")", ":", "while", "self", ".", "_status_update_active", ":", "# Make a local copy of the PIDs in case the dict is changed by", "# the main thread.", "for", "pid", "in", "list", "(", "self", ".", "_process_information_per_pid", ".", "keys", "(", ")", ")", ":", "self", ".", "_CheckStatusAnalysisProcess", "(", "pid", ")", "self", ".", "_UpdateForemanProcessStatus", "(", ")", "if", "self", ".", "_status_update_callback", ":", "self", ".", "_status_update_callback", "(", "self", ".", "_processing_status", ")", "time", ".", "sleep", "(", "self", ".", "_STATUS_UPDATE_INTERVAL", ")" ]
Main function of the status update thread.
[ "Main", "function", "of", "the", "status", "update", "thread", "." ]
python
train
36.357143
deepmind/sonnet
sonnet/python/modules/nets/mlp.py
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/nets/mlp.py#L237-L265
def transpose(self, name=None, activate_final=None): """Returns transposed `MLP`. Args: name: Optional string specifying the name of the transposed module. The default name is constructed by appending "_transpose" to `self.module_name`. activate_final: Optional boolean determining if the activation and batch normalization, if turned on, are applied to the final layer. Returns: Matching transposed `MLP` module. """ if name is None: name = self.module_name + "_transpose" if activate_final is None: activate_final = self.activate_final output_sizes = [lambda l=layer: l.input_shape[1] for layer in self._layers] output_sizes.reverse() return MLP( name=name, output_sizes=output_sizes, activation=self.activation, activate_final=activate_final, initializers=self.initializers, partitioners=self.partitioners, regularizers=self.regularizers, use_bias=self.use_bias, use_dropout=self.use_dropout)
[ "def", "transpose", "(", "self", ",", "name", "=", "None", ",", "activate_final", "=", "None", ")", ":", "if", "name", "is", "None", ":", "name", "=", "self", ".", "module_name", "+", "\"_transpose\"", "if", "activate_final", "is", "None", ":", "activate_final", "=", "self", ".", "activate_final", "output_sizes", "=", "[", "lambda", "l", "=", "layer", ":", "l", ".", "input_shape", "[", "1", "]", "for", "layer", "in", "self", ".", "_layers", "]", "output_sizes", ".", "reverse", "(", ")", "return", "MLP", "(", "name", "=", "name", ",", "output_sizes", "=", "output_sizes", ",", "activation", "=", "self", ".", "activation", ",", "activate_final", "=", "activate_final", ",", "initializers", "=", "self", ".", "initializers", ",", "partitioners", "=", "self", ".", "partitioners", ",", "regularizers", "=", "self", ".", "regularizers", ",", "use_bias", "=", "self", ".", "use_bias", ",", "use_dropout", "=", "self", ".", "use_dropout", ")" ]
Returns transposed `MLP`. Args: name: Optional string specifying the name of the transposed module. The default name is constructed by appending "_transpose" to `self.module_name`. activate_final: Optional boolean determining if the activation and batch normalization, if turned on, are applied to the final layer. Returns: Matching transposed `MLP` module.
[ "Returns", "transposed", "MLP", "." ]
python
train
35.551724
yougov/mongo-connector
mongo_connector/connector.py
https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/connector.py#L475-L480
def oplog_thread_join(self): """Stops all the OplogThreads """ LOG.info("MongoConnector: Stopping all OplogThreads") for thread in self.shard_set.values(): thread.join()
[ "def", "oplog_thread_join", "(", "self", ")", ":", "LOG", ".", "info", "(", "\"MongoConnector: Stopping all OplogThreads\"", ")", "for", "thread", "in", "self", ".", "shard_set", ".", "values", "(", ")", ":", "thread", ".", "join", "(", ")" ]
Stops all the OplogThreads
[ "Stops", "all", "the", "OplogThreads" ]
python
train
34.666667
tensorflow/tensorboard
tensorboard/plugins/debugger/tensor_store.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/tensor_store.py#L200-L257
def query(self, watch_key, time_indices=None, slicing=None, mapping=None): """Query tensor store for a given watch_key. Args: watch_key: The watch key to query. time_indices: A numpy-style slicing string for time indices. E.g., `-1`, `:-2`, `[::2]`. If not provided (`None`), will use -1. slicing: A numpy-style slicing string for individual time steps. mapping: An mapping string or a list of them. Supported mappings: `{None, 'image/png', 'health-pill'}`. Returns: The potentially sliced values as a nested list of values or its mapped format. A `list` of nested `list` of values. Raises: ValueError: If the shape of the sliced array is incompatible with mapping mode. Or if the mapping type is invalid. """ if watch_key not in self._tensor_data: raise KeyError("watch_key not found: %s" % watch_key) if time_indices is None: time_indices = '-1' time_slicing = tensor_helper.parse_time_indices(time_indices) all_time_indices = list(range(self._tensor_data[watch_key].num_total())) sliced_time_indices = all_time_indices[time_slicing] if not isinstance(sliced_time_indices, list): sliced_time_indices = [sliced_time_indices] recombine_and_map = False step_mapping = mapping if len(sliced_time_indices) > 1 and mapping not in (None, ): recombine_and_map = True step_mapping = None output = [] for index in sliced_time_indices: value = self._tensor_data[watch_key].query(index)[0] if (value is not None and not isinstance(value, debug_data.InconvertibleTensorProto)): output.append(tensor_helper.array_view( value, slicing=slicing, mapping=step_mapping)[2]) else: output.append(None) if recombine_and_map: if mapping == 'image/png': output = tensor_helper.array_to_base64_png(output) elif mapping and mapping != 'none': logger.warn( 'Unsupported mapping mode after recomining time steps: %s', mapping) return output
[ "def", "query", "(", "self", ",", "watch_key", ",", "time_indices", "=", "None", ",", "slicing", "=", "None", ",", "mapping", "=", "None", ")", ":", "if", "watch_key", "not", "in", "self", ".", "_tensor_data", ":", "raise", "KeyError", "(", "\"watch_key not found: %s\"", "%", "watch_key", ")", "if", "time_indices", "is", "None", ":", "time_indices", "=", "'-1'", "time_slicing", "=", "tensor_helper", ".", "parse_time_indices", "(", "time_indices", ")", "all_time_indices", "=", "list", "(", "range", "(", "self", ".", "_tensor_data", "[", "watch_key", "]", ".", "num_total", "(", ")", ")", ")", "sliced_time_indices", "=", "all_time_indices", "[", "time_slicing", "]", "if", "not", "isinstance", "(", "sliced_time_indices", ",", "list", ")", ":", "sliced_time_indices", "=", "[", "sliced_time_indices", "]", "recombine_and_map", "=", "False", "step_mapping", "=", "mapping", "if", "len", "(", "sliced_time_indices", ")", ">", "1", "and", "mapping", "not", "in", "(", "None", ",", ")", ":", "recombine_and_map", "=", "True", "step_mapping", "=", "None", "output", "=", "[", "]", "for", "index", "in", "sliced_time_indices", ":", "value", "=", "self", ".", "_tensor_data", "[", "watch_key", "]", ".", "query", "(", "index", ")", "[", "0", "]", "if", "(", "value", "is", "not", "None", "and", "not", "isinstance", "(", "value", ",", "debug_data", ".", "InconvertibleTensorProto", ")", ")", ":", "output", ".", "append", "(", "tensor_helper", ".", "array_view", "(", "value", ",", "slicing", "=", "slicing", ",", "mapping", "=", "step_mapping", ")", "[", "2", "]", ")", "else", ":", "output", ".", "append", "(", "None", ")", "if", "recombine_and_map", ":", "if", "mapping", "==", "'image/png'", ":", "output", "=", "tensor_helper", ".", "array_to_base64_png", "(", "output", ")", "elif", "mapping", "and", "mapping", "!=", "'none'", ":", "logger", ".", "warn", "(", "'Unsupported mapping mode after recomining time steps: %s'", ",", "mapping", ")", "return", "output" ]
Query tensor store for a given watch_key. Args: watch_key: The watch key to query. time_indices: A numpy-style slicing string for time indices. E.g., `-1`, `:-2`, `[::2]`. If not provided (`None`), will use -1. slicing: A numpy-style slicing string for individual time steps. mapping: An mapping string or a list of them. Supported mappings: `{None, 'image/png', 'health-pill'}`. Returns: The potentially sliced values as a nested list of values or its mapped format. A `list` of nested `list` of values. Raises: ValueError: If the shape of the sliced array is incompatible with mapping mode. Or if the mapping type is invalid.
[ "Query", "tensor", "store", "for", "a", "given", "watch_key", "." ]
python
train
36.034483
basho/riak-python-client
riak/client/operations.py
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/client/operations.py#L807-L845
def stream_mapred(self, inputs, query, timeout): """ Streams a MapReduce query as (phase, data) pairs. This is a generator method which should be iterated over. The caller should explicitly close the returned iterator, either using :func:`contextlib.closing` or calling ``close()`` explicitly. Consuming the entire iterator will also close the stream. If it does not, the associated connection might not be returned to the pool. Example:: from contextlib import closing # Using contextlib.closing with closing(mymapred.stream()) as results: for phase, result in results: do_something(phase, result) # Explicit close() stream = mymapred.stream() for phase, result in stream: do_something(phase, result) stream.close() :param inputs: the input list/structure :type inputs: list, dict :param query: the list of query phases :type query: list :param timeout: the query timeout :type timeout: integer, None :rtype: iterator """ _validate_timeout(timeout) def make_op(transport): return transport.stream_mapred(inputs, query, timeout) for phase, data in self._stream_with_retry(make_op): yield phase, data
[ "def", "stream_mapred", "(", "self", ",", "inputs", ",", "query", ",", "timeout", ")", ":", "_validate_timeout", "(", "timeout", ")", "def", "make_op", "(", "transport", ")", ":", "return", "transport", ".", "stream_mapred", "(", "inputs", ",", "query", ",", "timeout", ")", "for", "phase", ",", "data", "in", "self", ".", "_stream_with_retry", "(", "make_op", ")", ":", "yield", "phase", ",", "data" ]
Streams a MapReduce query as (phase, data) pairs. This is a generator method which should be iterated over. The caller should explicitly close the returned iterator, either using :func:`contextlib.closing` or calling ``close()`` explicitly. Consuming the entire iterator will also close the stream. If it does not, the associated connection might not be returned to the pool. Example:: from contextlib import closing # Using contextlib.closing with closing(mymapred.stream()) as results: for phase, result in results: do_something(phase, result) # Explicit close() stream = mymapred.stream() for phase, result in stream: do_something(phase, result) stream.close() :param inputs: the input list/structure :type inputs: list, dict :param query: the list of query phases :type query: list :param timeout: the query timeout :type timeout: integer, None :rtype: iterator
[ "Streams", "a", "MapReduce", "query", "as", "(", "phase", "data", ")", "pairs", ".", "This", "is", "a", "generator", "method", "which", "should", "be", "iterated", "over", "." ]
python
train
35.410256
pandas-profiling/pandas-profiling
pandas_profiling/base.py
https://github.com/pandas-profiling/pandas-profiling/blob/003d236daee8b7aca39c62708b18d59bced0bc03/pandas_profiling/base.py#L63-L123
def get_vartype(data): """Infer the type of a variable (technically a Series). The types supported are split in standard types and special types. Standard types: * Categorical (`TYPE_CAT`): the default type if no other one can be determined * Numerical (`TYPE_NUM`): if it contains numbers * Boolean (`TYPE_BOOL`): at this time only detected if it contains boolean values, see todo * Date (`TYPE_DATE`): if it contains datetime Special types: * Constant (`S_TYPE_CONST`): if all values in the variable are equal * Unique (`S_TYPE_UNIQUE`): if all values in the variable are different * Unsupported (`S_TYPE_UNSUPPORTED`): if the variable is unsupported The result is cached by column name in a global variable to avoid recomputing. Parameters ---------- data : Series The data type of the Series. Returns ------- str The data type of the Series. Notes ---- * Should improve verification when a categorical or numeric field has 3 values, it could be a categorical field or just a boolean with NaN values * #72: Numeric with low Distinct count should be treated as "Categorical" """ if data.name is not None and data.name in _MEMO: return _MEMO[data.name] vartype = None try: distinct_count = get_groupby_statistic(data)[1] leng = len(data) if distinct_count <= 1: vartype = S_TYPE_CONST elif pd.api.types.is_bool_dtype(data) or (distinct_count == 2 and pd.api.types.is_numeric_dtype(data)): vartype = TYPE_BOOL elif pd.api.types.is_numeric_dtype(data): vartype = TYPE_NUM elif pd.api.types.is_datetime64_dtype(data): vartype = TYPE_DATE elif distinct_count == leng: vartype = S_TYPE_UNIQUE else: vartype = TYPE_CAT except: vartype = S_TYPE_UNSUPPORTED if data.name is not None: _MEMO[data.name] = vartype return vartype
[ "def", "get_vartype", "(", "data", ")", ":", "if", "data", ".", "name", "is", "not", "None", "and", "data", ".", "name", "in", "_MEMO", ":", "return", "_MEMO", "[", "data", ".", "name", "]", "vartype", "=", "None", "try", ":", "distinct_count", "=", "get_groupby_statistic", "(", "data", ")", "[", "1", "]", "leng", "=", "len", "(", "data", ")", "if", "distinct_count", "<=", "1", ":", "vartype", "=", "S_TYPE_CONST", "elif", "pd", ".", "api", ".", "types", ".", "is_bool_dtype", "(", "data", ")", "or", "(", "distinct_count", "==", "2", "and", "pd", ".", "api", ".", "types", ".", "is_numeric_dtype", "(", "data", ")", ")", ":", "vartype", "=", "TYPE_BOOL", "elif", "pd", ".", "api", ".", "types", ".", "is_numeric_dtype", "(", "data", ")", ":", "vartype", "=", "TYPE_NUM", "elif", "pd", ".", "api", ".", "types", ".", "is_datetime64_dtype", "(", "data", ")", ":", "vartype", "=", "TYPE_DATE", "elif", "distinct_count", "==", "leng", ":", "vartype", "=", "S_TYPE_UNIQUE", "else", ":", "vartype", "=", "TYPE_CAT", "except", ":", "vartype", "=", "S_TYPE_UNSUPPORTED", "if", "data", ".", "name", "is", "not", "None", ":", "_MEMO", "[", "data", ".", "name", "]", "=", "vartype", "return", "vartype" ]
Infer the type of a variable (technically a Series). The types supported are split in standard types and special types. Standard types: * Categorical (`TYPE_CAT`): the default type if no other one can be determined * Numerical (`TYPE_NUM`): if it contains numbers * Boolean (`TYPE_BOOL`): at this time only detected if it contains boolean values, see todo * Date (`TYPE_DATE`): if it contains datetime Special types: * Constant (`S_TYPE_CONST`): if all values in the variable are equal * Unique (`S_TYPE_UNIQUE`): if all values in the variable are different * Unsupported (`S_TYPE_UNSUPPORTED`): if the variable is unsupported The result is cached by column name in a global variable to avoid recomputing. Parameters ---------- data : Series The data type of the Series. Returns ------- str The data type of the Series. Notes ---- * Should improve verification when a categorical or numeric field has 3 values, it could be a categorical field or just a boolean with NaN values * #72: Numeric with low Distinct count should be treated as "Categorical"
[ "Infer", "the", "type", "of", "a", "variable", "(", "technically", "a", "Series", ")", "." ]
python
train
32.885246
kstaniek/condoor
condoor/protocols/telnet.py
https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/protocols/telnet.py#L100-L108
def disconnect(self, device): """Disconnect using protocol specific method.""" # self.device.ctrl.sendcontrol(']') # self.device.ctrl.sendline('quit') self.log("TELNET disconnect") try: self.device.ctrl.send(chr(4)) except OSError: self.log("Protocol already disconnected")
[ "def", "disconnect", "(", "self", ",", "device", ")", ":", "# self.device.ctrl.sendcontrol(']')", "# self.device.ctrl.sendline('quit')", "self", ".", "log", "(", "\"TELNET disconnect\"", ")", "try", ":", "self", ".", "device", ".", "ctrl", ".", "send", "(", "chr", "(", "4", ")", ")", "except", "OSError", ":", "self", ".", "log", "(", "\"Protocol already disconnected\"", ")" ]
Disconnect using protocol specific method.
[ "Disconnect", "using", "protocol", "specific", "method", "." ]
python
train
37.444444
thieman/dagobah
dagobah/core/core.py
https://github.com/thieman/dagobah/blob/e624180c2291034960302c9e0b818b65b5a7ee11/dagobah/core/core.py#L926-L936
def kill(self): """ Send SIGKILL to the task's process. """ logger.info('Sending SIGKILL to task {0}'.format(self.name)) if hasattr(self, 'remote_client') and self.remote_client is not None: self.kill_sent = True self.remote_client.close() return if not self.process: raise DagobahError('task does not have a running process') self.kill_sent = True self.process.kill()
[ "def", "kill", "(", "self", ")", ":", "logger", ".", "info", "(", "'Sending SIGKILL to task {0}'", ".", "format", "(", "self", ".", "name", ")", ")", "if", "hasattr", "(", "self", ",", "'remote_client'", ")", "and", "self", ".", "remote_client", "is", "not", "None", ":", "self", ".", "kill_sent", "=", "True", "self", ".", "remote_client", ".", "close", "(", ")", "return", "if", "not", "self", ".", "process", ":", "raise", "DagobahError", "(", "'task does not have a running process'", ")", "self", ".", "kill_sent", "=", "True", "self", ".", "process", ".", "kill", "(", ")" ]
Send SIGKILL to the task's process.
[ "Send", "SIGKILL", "to", "the", "task", "s", "process", "." ]
python
train
41.272727
nvbn/thefuck
thefuck/output_readers/rerun.py
https://github.com/nvbn/thefuck/blob/40ab4eb62db57627bff10cf029d29c94704086a2/thefuck/output_readers/rerun.py#L23-L42
def _wait_output(popen, is_slow): """Returns `True` if we can get output of the command in the `settings.wait_command` time. Command will be killed if it wasn't finished in the time. :type popen: Popen :rtype: bool """ proc = Process(popen.pid) try: proc.wait(settings.wait_slow_command if is_slow else settings.wait_command) return True except TimeoutExpired: for child in proc.children(recursive=True): _kill_process(child) _kill_process(proc) return False
[ "def", "_wait_output", "(", "popen", ",", "is_slow", ")", ":", "proc", "=", "Process", "(", "popen", ".", "pid", ")", "try", ":", "proc", ".", "wait", "(", "settings", ".", "wait_slow_command", "if", "is_slow", "else", "settings", ".", "wait_command", ")", "return", "True", "except", "TimeoutExpired", ":", "for", "child", "in", "proc", ".", "children", "(", "recursive", "=", "True", ")", ":", "_kill_process", "(", "child", ")", "_kill_process", "(", "proc", ")", "return", "False" ]
Returns `True` if we can get output of the command in the `settings.wait_command` time. Command will be killed if it wasn't finished in the time. :type popen: Popen :rtype: bool
[ "Returns", "True", "if", "we", "can", "get", "output", "of", "the", "command", "in", "the", "settings", ".", "wait_command", "time", "." ]
python
train
27.4
lexruee/pi-switch-python
send.py
https://github.com/lexruee/pi-switch-python/blob/5c367a6d51aa15811e997160746d1512a37e2dc6/send.py#L32-L71
def create_switch(type, settings, pin): """Create a switch. Args: type: (str): type of the switch [A,B,C,D] settings (str): a comma separted list pin (int): wiringPi pin Returns: switch """ switch = None if type == "A": group, device = settings.split(",") switch = pi_switch.RCSwitchA(group, device) elif type == "B": addr, channel = settings.split(",") addr = int(addr) channel = int(channel) switch = pi_switch.RCSwitchB(addr, channel) elif type == "C": family, group, device = settings.split(",") group = int(group) device = int(device) switch = pi_switch.RCSwitchC(family, group, device) elif type == "D": group, device = settings.split(",") device = int(device) switch = pi_switch.RCSwitchD(group, device) else: print "Type %s is not supported!" % type sys.exit() switch.enableTransmit(pin) return switch
[ "def", "create_switch", "(", "type", ",", "settings", ",", "pin", ")", ":", "switch", "=", "None", "if", "type", "==", "\"A\"", ":", "group", ",", "device", "=", "settings", ".", "split", "(", "\",\"", ")", "switch", "=", "pi_switch", ".", "RCSwitchA", "(", "group", ",", "device", ")", "elif", "type", "==", "\"B\"", ":", "addr", ",", "channel", "=", "settings", ".", "split", "(", "\",\"", ")", "addr", "=", "int", "(", "addr", ")", "channel", "=", "int", "(", "channel", ")", "switch", "=", "pi_switch", ".", "RCSwitchB", "(", "addr", ",", "channel", ")", "elif", "type", "==", "\"C\"", ":", "family", ",", "group", ",", "device", "=", "settings", ".", "split", "(", "\",\"", ")", "group", "=", "int", "(", "group", ")", "device", "=", "int", "(", "device", ")", "switch", "=", "pi_switch", ".", "RCSwitchC", "(", "family", ",", "group", ",", "device", ")", "elif", "type", "==", "\"D\"", ":", "group", ",", "device", "=", "settings", ".", "split", "(", "\",\"", ")", "device", "=", "int", "(", "device", ")", "switch", "=", "pi_switch", ".", "RCSwitchD", "(", "group", ",", "device", ")", "else", ":", "print", "\"Type %s is not supported!\"", "%", "type", "sys", ".", "exit", "(", ")", "switch", ".", "enableTransmit", "(", "pin", ")", "return", "switch" ]
Create a switch. Args: type: (str): type of the switch [A,B,C,D] settings (str): a comma separted list pin (int): wiringPi pin Returns: switch
[ "Create", "a", "switch", "." ]
python
train
21.525
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/extensions_v1beta1_api.py
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/extensions_v1beta1_api.py#L1375-L1404
def delete_collection_namespaced_replica_set(self, namespace, **kwargs): # noqa: E501 """delete_collection_namespaced_replica_set # noqa: E501 delete collection of ReplicaSet # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_replica_set(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_collection_namespaced_replica_set_with_http_info(namespace, **kwargs) # noqa: E501 else: (data) = self.delete_collection_namespaced_replica_set_with_http_info(namespace, **kwargs) # noqa: E501 return data
[ "def", "delete_collection_namespaced_replica_set", "(", "self", ",", "namespace", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "delete_collection_namespaced_replica_set_with_http_info", "(", "namespace", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "delete_collection_namespaced_replica_set_with_http_info", "(", "namespace", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
delete_collection_namespaced_replica_set # noqa: E501 delete collection of ReplicaSet # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_replica_set(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread.
[ "delete_collection_namespaced_replica_set", "#", "noqa", ":", "E501" ]
python
train
164.4
pkgw/pwkit
pwkit/kwargv.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/kwargv.py#L464-L475
def parse_or_die(self, args=None): """Like :meth:`ParseKeywords.parse`, but calls :func:`pkwit.cli.die` if a :exc:`KwargvError` is raised, printing the exception text. Returns *self* for convenience. """ from .cli import die try: return self.parse(args) except KwargvError as e: die(e)
[ "def", "parse_or_die", "(", "self", ",", "args", "=", "None", ")", ":", "from", ".", "cli", "import", "die", "try", ":", "return", "self", ".", "parse", "(", "args", ")", "except", "KwargvError", "as", "e", ":", "die", "(", "e", ")" ]
Like :meth:`ParseKeywords.parse`, but calls :func:`pkwit.cli.die` if a :exc:`KwargvError` is raised, printing the exception text. Returns *self* for convenience.
[ "Like", ":", "meth", ":", "ParseKeywords", ".", "parse", "but", "calls", ":", "func", ":", "pkwit", ".", "cli", ".", "die", "if", "a", ":", "exc", ":", "KwargvError", "is", "raised", "printing", "the", "exception", "text", ".", "Returns", "*", "self", "*", "for", "convenience", "." ]
python
train
29.666667
yero13/na3x
na3x/transformation/transformer.py
https://github.com/yero13/na3x/blob/b31ef801ea574081125020a7d0f9c4242f8f8b02/na3x/transformation/transformer.py#L305-L329
def copy(input, **params): """ Copies input or input's selected fields :param input: :param params: :return: input """ PARAM_FIELDS = 'fields' def filter_fields(obj, fields): return {k:v for k,v in obj.items() if k in fields} if PARAM_FIELDS in params: fields = params.get(PARAM_FIELDS) if isinstance(input, list): res = [] for row in input: res.append(filter_fields(row, fields)) return res elif isinstance(input, dict): return filter_fields(input, fields) else: raise NotImplementedError('{} is not supported'.format(type(input))) else: return input
[ "def", "copy", "(", "input", ",", "*", "*", "params", ")", ":", "PARAM_FIELDS", "=", "'fields'", "def", "filter_fields", "(", "obj", ",", "fields", ")", ":", "return", "{", "k", ":", "v", "for", "k", ",", "v", "in", "obj", ".", "items", "(", ")", "if", "k", "in", "fields", "}", "if", "PARAM_FIELDS", "in", "params", ":", "fields", "=", "params", ".", "get", "(", "PARAM_FIELDS", ")", "if", "isinstance", "(", "input", ",", "list", ")", ":", "res", "=", "[", "]", "for", "row", "in", "input", ":", "res", ".", "append", "(", "filter_fields", "(", "row", ",", "fields", ")", ")", "return", "res", "elif", "isinstance", "(", "input", ",", "dict", ")", ":", "return", "filter_fields", "(", "input", ",", "fields", ")", "else", ":", "raise", "NotImplementedError", "(", "'{} is not supported'", ".", "format", "(", "type", "(", "input", ")", ")", ")", "else", ":", "return", "input" ]
Copies input or input's selected fields :param input: :param params: :return: input
[ "Copies", "input", "or", "input", "s", "selected", "fields", ":", "param", "input", ":", ":", "param", "params", ":", ":", "return", ":", "input" ]
python
train
27.72
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L19817-L19847
def fs_obj_query_info(self, path, follow_symlinks): """Queries information about a file system object (file, directory, etc) in the guest. in path of type str Path to the file system object to gather information about. Guest path style. in follow_symlinks of type bool Information about symbolic links is returned if @c false. Otherwise, symbolic links are followed and the returned information concerns itself with the symlink target if @c true. return info of type :class:`IGuestFsObjInfo` :py:class:`IGuestFsObjInfo` object containing the information. raises :class:`VBoxErrorObjectNotFound` The file system object was not found. raises :class:`VBoxErrorIprtError` Error while querying information. """ if not isinstance(path, basestring): raise TypeError("path can only be an instance of type basestring") if not isinstance(follow_symlinks, bool): raise TypeError("follow_symlinks can only be an instance of type bool") info = self._call("fsObjQueryInfo", in_p=[path, follow_symlinks]) info = IGuestFsObjInfo(info) return info
[ "def", "fs_obj_query_info", "(", "self", ",", "path", ",", "follow_symlinks", ")", ":", "if", "not", "isinstance", "(", "path", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"path can only be an instance of type basestring\"", ")", "if", "not", "isinstance", "(", "follow_symlinks", ",", "bool", ")", ":", "raise", "TypeError", "(", "\"follow_symlinks can only be an instance of type bool\"", ")", "info", "=", "self", ".", "_call", "(", "\"fsObjQueryInfo\"", ",", "in_p", "=", "[", "path", ",", "follow_symlinks", "]", ")", "info", "=", "IGuestFsObjInfo", "(", "info", ")", "return", "info" ]
Queries information about a file system object (file, directory, etc) in the guest. in path of type str Path to the file system object to gather information about. Guest path style. in follow_symlinks of type bool Information about symbolic links is returned if @c false. Otherwise, symbolic links are followed and the returned information concerns itself with the symlink target if @c true. return info of type :class:`IGuestFsObjInfo` :py:class:`IGuestFsObjInfo` object containing the information. raises :class:`VBoxErrorObjectNotFound` The file system object was not found. raises :class:`VBoxErrorIprtError` Error while querying information.
[ "Queries", "information", "about", "a", "file", "system", "object", "(", "file", "directory", "etc", ")", "in", "the", "guest", "." ]
python
train
40.935484
metglobal/django-exchange
exchange/conversion.py
https://github.com/metglobal/django-exchange/blob/2133593885e02f42a4ed2ed4be2763c4777a1245/exchange/conversion.py#L36-L52
def convert_values(args_list): """convert_value in bulk. :param args_list: list of value, source, target currency pairs :return: map of converted values """ rate_map = get_rates(map(itemgetter(1, 2), args_list)) value_map = {} for value, source, target in args_list: args = (value, source, target) if source == target: value_map[args] = value else: value_map[args] = value * rate_map[(source, target)] return value_map
[ "def", "convert_values", "(", "args_list", ")", ":", "rate_map", "=", "get_rates", "(", "map", "(", "itemgetter", "(", "1", ",", "2", ")", ",", "args_list", ")", ")", "value_map", "=", "{", "}", "for", "value", ",", "source", ",", "target", "in", "args_list", ":", "args", "=", "(", "value", ",", "source", ",", "target", ")", "if", "source", "==", "target", ":", "value_map", "[", "args", "]", "=", "value", "else", ":", "value_map", "[", "args", "]", "=", "value", "*", "rate_map", "[", "(", "source", ",", "target", ")", "]", "return", "value_map" ]
convert_value in bulk. :param args_list: list of value, source, target currency pairs :return: map of converted values
[ "convert_value", "in", "bulk", "." ]
python
train
28.529412
TeamHG-Memex/eli5
eli5/formatters/as_dataframe.py
https://github.com/TeamHG-Memex/eli5/blob/371b402a0676295c05e582a2dd591f7af476b86b/eli5/formatters/as_dataframe.py#L26-L35
def explain_weights_dfs(estimator, **kwargs): # type: (...) -> Dict[str, pd.DataFrame] """ Explain weights and export them to a dict with ``pandas.DataFrame`` values (as :func:`eli5.formatters.as_dataframe.format_as_dataframes` does). All keyword arguments are passed to :func:`eli5.explain_weights`. Weights of all features are exported by default. """ kwargs = _set_defaults(kwargs) return format_as_dataframes( eli5.explain_weights(estimator, **kwargs))
[ "def", "explain_weights_dfs", "(", "estimator", ",", "*", "*", "kwargs", ")", ":", "# type: (...) -> Dict[str, pd.DataFrame]", "kwargs", "=", "_set_defaults", "(", "kwargs", ")", "return", "format_as_dataframes", "(", "eli5", ".", "explain_weights", "(", "estimator", ",", "*", "*", "kwargs", ")", ")" ]
Explain weights and export them to a dict with ``pandas.DataFrame`` values (as :func:`eli5.formatters.as_dataframe.format_as_dataframes` does). All keyword arguments are passed to :func:`eli5.explain_weights`. Weights of all features are exported by default.
[ "Explain", "weights", "and", "export", "them", "to", "a", "dict", "with", "pandas", ".", "DataFrame", "values", "(", "as", ":", "func", ":", "eli5", ".", "formatters", ".", "as_dataframe", ".", "format_as_dataframes", "does", ")", ".", "All", "keyword", "arguments", "are", "passed", "to", ":", "func", ":", "eli5", ".", "explain_weights", ".", "Weights", "of", "all", "features", "are", "exported", "by", "default", "." ]
python
train
48.7
PmagPy/PmagPy
pmagpy/ipmag.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L10216-L10540
def zeq_magic(meas_file='measurements.txt', spec_file='',crd='s',input_dir_path='.', angle=0, n_plots=5, save_plots=True, fmt="svg", interactive=False, specimen="", samp_file='samples.txt', contribution=None,fignum=1): """ zeq_magic makes zijderveld and equal area plots for magic formatted measurements files. Parameters ---------- meas_file : str input measurement file spec_file : str input specimen interpretation file samp_file : str input sample orientations file crd : str coordinate system [s,g,t] for specimen, geographic, tilt corrected g,t options require a sample file with specimen and bedding orientation input_dir_path : str input directory of meas_file, default "." angle : float angle of X direction with respect to specimen X n_plots : int, default 5 maximum number of plots to make if you want to make all possible plots, specify "all" save_plots : bool, default True if True, create and save all requested plots fmt : str, default "svg" format for figures, [svg, jpg, pdf, png] interactive : bool, default False interactively plot and display for each specimen (this is best used on the command line only) specimen : str, default "" specimen name to plot samp_file : str, default 'samples.txt' name of samples file contribution : cb.Contribution, default None if provided, use Contribution object instead of reading in data from files fignum : matplotlib figure number """ def plot_interpretations(ZED, spec_container, this_specimen, this_specimen_measurements, datablock): if cb.is_null(spec_container) or cb.is_null(this_specimen_measurements) or cb.is_null(datablock): return ZED if 'method_codes' not in spec_container.df.columns: return ZED prior_spec_data = spec_container.get_records_for_code( 'LP-DIR', strict_match=False) # look up all prior directional interpretations prior_specimen_interpretations=[] if not len(prior_spec_data): return ZED mpars = {"specimen_direction_type": "Error"} if len(prior_spec_data): prior_specimen_interpretations = prior_spec_data[prior_spec_data['specimen'].astype(str) == this_specimen] #.str.match(this_specimen) == True] if len(prior_specimen_interpretations): if len(prior_specimen_interpretations)>0: beg_pcas = pd.to_numeric( prior_specimen_interpretations.meas_step_min.values).tolist() end_pcas = pd.to_numeric( prior_specimen_interpretations.meas_step_max.values).tolist() spec_methods = prior_specimen_interpretations.method_codes.tolist() # step through all prior interpretations and plot them for ind in range(len(beg_pcas)): spec_meths = spec_methods[ind].split(':') for m in spec_meths: if 'DE-BFL' in m: calculation_type = 'DE-BFL' # best fit line if 'DE-BFP' in m: calculation_type = 'DE-BFP' # best fit plane if 'DE-FM' in m: calculation_type = 'DE-FM' # fisher mean if 'DE-BFL-A' in m: calculation_type = 'DE-BFL-A' # anchored best fit line treatments = pd.to_numeric(this_specimen_measurements.treatment).tolist() if len(beg_pcas)!=0: try: # getting the starting and ending points start, end = treatments.index(beg_pcas[ind]), treatments.index(end_pcas[ind]) mpars = pmag.domean( datablock, start, end, calculation_type) except ValueError as ex: mpars['specimen_direction_type'] = "Error" try: if beg_pcas[ind] == 0: start = 0 else: start = treatments.index(beg_pcas[ind]) if end_pcas[ind] == 0: end = 0 else: end = treatments.index(end_pcas[ind]) mpars = pmag.domean( datablock, start, end, calculation_type) except ValueError: mpars['specimen_direction_type'] = "Error" # calculate direction/plane if mpars["specimen_direction_type"] != "Error": # put it on the plot pmagplotlib.plot_dir(ZED, mpars, datablock, angle) #if interactive: # pmagplotlib.draw_figs(ZED) else: print('\n-W- Specimen {} record contains invalid start/stop bounds:'.format(this_specimen)) print(prior_spec_data.loc[this_specimen][['meas_step_min', 'meas_step_max']]) print('\n Measurement records:') cols = list(set(['treat_ac_field', 'treat_temp']).intersection(this_specimen_measurements.columns)) print(this_specimen_measurements[cols]) print('\n Data will be plotted without interpretations\n') return ZED def make_plots(spec, cnt, meas_df, spec_container, samp_container=None): # get sample data for orientation if spec_container: try: samps = spec_container.df.loc[spec, 'sample'] except KeyError: samps = "" samp_df = [] if isinstance(samps, int) or isinstance(samps, float) or isinstance(samps, np.int64): if np.isnan(samps): samp = "" samp_df = [] else: samp = str(samps) samp_container.df.index = samp_container.df.index.astype(str) samp_df = samp_container.df[samp_container.df.index == samp] elif isinstance(samps, type(None)): samp = "" samp_df = [] elif len(samps): if isinstance(samps, str): samp = samps else: samp = samps.iloc[0] samp_df = samp_container.df[samp_container.df.index == samp] else: samp_df = [] # we can make the figure dictionary that pmagplotlib likes: ZED = {'eqarea': cnt, 'zijd': cnt+1, 'demag': cnt+2} # make datablock # get the relevant data spec_df = meas_df[meas_df.specimen == s] # remove ARM data spec_df = spec_df[- spec_df.method_codes.str.contains( 'LP-*[\w]*-ARM')] # split data into NRM, thermal, and af dataframes spec_df_nrm = spec_df[spec_df.method_codes.str.contains( 'LT-NO')] # get the NRM data spec_df_th = spec_df[spec_df.method_codes.str.contains( 'LT-T-Z')] # zero field thermal demag steps try: cond = spec_df.method_codes.str.contains('(^|[\s\:])LT-PTRM') spec_df_th = spec_df_th[-cond] # get rid of some pTRM steps except ValueError: keep_inds = [] n = 0 for ind, row in spec_df_th.copy().iterrows(): if 'LT-PTRM' in row['method_codes'] and 'ALT-PTRM' not in row['method_codes']: keep_inds.append(n) else: pass n += 1 if len(keep_inds) < n: spec_df_th = spec_df_th.iloc[keep_inds] spec_df_af = spec_df[spec_df.method_codes.str.contains('LT-AF-Z')] this_spec_meas_df = None datablock = None if (not len(spec_df_th.index) > 1) and (not len(spec_df_af.index) > 1): return if len(spec_df_th.index) > 1: # this is a thermal run this_spec_meas_df = pd.concat([spec_df_nrm, spec_df_th]) # make sure all decs/incs are filled in n_rows = len(this_spec_meas_df) this_spec_meas_df = this_spec_meas_df.dropna(how='any', subset=['dir_dec', 'dir_inc', 'magn_moment']) if n_rows > len(this_spec_meas_df): print('-W- Some dec/inc/moment data were missing for specimen {}, so {} measurement row(s) were excluded'.format(s, n_rows - len(this_spec_meas_df))) # geographic transformation if coord != "-1" and len(samp_df): this_spec_meas_df = transform_to_geographic(this_spec_meas_df, samp_df, samp, coord) units = 'K' # units are kelvin try: this_spec_meas_df['magn_moment'] = this_spec_meas_df['magn_moment'].astype(float) this_spec_meas_df['treat_temp'] = this_spec_meas_df['treat_temp'].astype(float) except: print('-W- There are malformed or missing data for specimen {}, skipping'.format(spec)) return datablock = this_spec_meas_df[['treat_temp', 'dir_dec', 'dir_inc', 'magn_moment', 'blank', 'quality']].values.tolist() ZED = pmagplotlib.plot_zed(ZED, datablock, angle, s, units) if len(spec_df_af.index) > 1: # this is an af run this_spec_meas_df = pd.concat([spec_df_nrm, spec_df_af]) # make sure all decs/incs are filled in n_rows = len(this_spec_meas_df) this_spec_meas_df = this_spec_meas_df.dropna(how='any', subset=['dir_dec', 'dir_inc', 'magn_moment']) if n_rows > len(this_spec_meas_df): print('-W- Some dec/inc/moment data were missing for specimen {}, so {} measurement row(s) were excluded'.format(s, n_rows - len(this_spec_meas_df))) # geographic transformation if coord != "-1" and len(samp_df): this_spec_meas_df = transform_to_geographic(this_spec_meas_df, samp_df, samp, coord) units = 'T' # these are AF data try: this_spec_meas_df['magn_moment'] = this_spec_meas_df['magn_moment'].astype(float) this_spec_meas_df['treat_ac_field'] = this_spec_meas_df['treat_ac_field'].astype(float) except: print('-W- There are malformed or missing data for specimen {}, skipping'.format(spec)) return datablock = this_spec_meas_df[['treat_ac_field', 'dir_dec', 'dir_inc', 'magn_moment', 'blank', 'quality']].values.tolist() ZED = pmagplotlib.plot_zed(ZED, datablock, angle, s, units) return plot_interpretations(ZED, spec_container, s, this_spec_meas_df, datablock) if interactive: save_plots = False # read in MagIC formatted data if contribution object not provided if not isinstance(contribution, cb.Contribution): input_dir_path = os.path.realpath(input_dir_path) file_path = pmag.resolve_file_name(meas_file, input_dir_path) # read in magic formatted data if not os.path.exists(file_path): print('No such file:', file_path) return False, [] custom_filenames = {'measurements': file_path, 'specimens': spec_file, 'samples': samp_file} contribution = cb.Contribution(input_dir_path, custom_filenames=custom_filenames, read_tables=['measurements', 'specimens', 'contribution', 'samples']) if pmagplotlib.isServer: try: contribution.propagate_location_to_samples() contribution.propagate_location_to_specimens() contribution.propagate_location_to_measurements() except KeyError as ex: pass meas_container = contribution.tables['measurements'] meas_df = contribution.tables['measurements'].df # #meas_df=pd.read_csv(file_path, sep='\t', header=1) spec_container = contribution.tables.get('specimens', None) samp_container = contribution.tables.get('samples', None) #if not spec_file: # spec_file = os.path.join(os.path.split(file_path)[0], "specimens.txt") #if os.path.exists(spec_file): # spec_container = cb.MagicDataFrame(spec_file, dtype="specimens") #else: # spec_container = None meas_df['blank'] = "" # this is a dummy variable expected by plotZED if 'treat_ac_field' in meas_df.columns: # create 'treatment' column. # uses treat_temp if treat_ac_field is missing OR zero. # (have to take this into account for plotting later) if 'treat_temp' in meas_df.columns: meas_df['treatment'] = meas_df['treat_ac_field'].where( cond=meas_df['treat_ac_field'].astype(bool), other=meas_df['treat_temp']) else: meas_df['treatment'] = meas_df['treat_ac_field'] else: meas_df['treatment'] = meas_df['treat_temp'] if crd == "s": coord = "-1" elif crd == "t": coord = "100" else: coord = "0" specimens = meas_df.specimen.unique() # list of specimen names if len(specimens) == 0: print('there are no data for plotting') return False, [] # check measurement table for req'd fields missing = [] reqd_cols_present = meas_df.columns.intersection(['dir_dec', 'dir_inc', 'magn_moment']) for col in ['dir_dec', 'dir_inc', 'magn_moment']: if col not in reqd_cols_present: missing.append(col) if missing: print('-W- Missing required column(s) {}, cannot run zeq_magic'.format(', '.join(missing))) return False, [] cnt = fignum if n_plots != "all": if len(specimens) > n_plots: specimens = specimens[:n_plots] saved = [] if specimen: specimens = [specimen] for s in specimens: ZED = make_plots(s, cnt, meas_df, spec_container, samp_container) if not ZED: if pmagplotlib.verbose: print('No plots could be created for specimen:', s) continue titles = {key: s + "_" + key + "." + fmt for key in ZED} if pmagplotlib.isServer: titles = {} titles['eqarea'] = 'Equal Area Plot' titles['zijd'] = 'Zijderveld Plot' titles['demag'] = 'Demagnetization Plot' con_id = "" if 'contribution' in contribution.tables: if 'id' in contribution.tables['contribution'].df.columns: con_id = contribution.tables['contribution'].df['id'].values[0] pmagplotlib.add_borders(ZED, titles, con_id=con_id) for title in titles: # try to get the full hierarchy for plot names df_slice = meas_container.df[meas_container.df['specimen'] == s] location = str(meas_container.get_name('location', df_slice)) site = str(meas_container.get_name('site', df_slice)) sample = str(meas_container.get_name('sample', df_slice)) # add coord here! filename = 'LO:_'+location+'_SI:_'+site+'_SA:_'+sample + \ '_SP:_'+str(s)+'_CO:_' + '_TY:_'+title+'_.png' titles[title] = filename if save_plots: saved.extend(pmagplotlib.save_plots(ZED, titles)) elif interactive: pmagplotlib.draw_figs(ZED) ans = pmagplotlib.save_or_quit() if ans == 'a': saved.extend(pmagplotlib.save_plots(ZED, titles)) else: continue else: cnt += 3 return True, saved
[ "def", "zeq_magic", "(", "meas_file", "=", "'measurements.txt'", ",", "spec_file", "=", "''", ",", "crd", "=", "'s'", ",", "input_dir_path", "=", "'.'", ",", "angle", "=", "0", ",", "n_plots", "=", "5", ",", "save_plots", "=", "True", ",", "fmt", "=", "\"svg\"", ",", "interactive", "=", "False", ",", "specimen", "=", "\"\"", ",", "samp_file", "=", "'samples.txt'", ",", "contribution", "=", "None", ",", "fignum", "=", "1", ")", ":", "def", "plot_interpretations", "(", "ZED", ",", "spec_container", ",", "this_specimen", ",", "this_specimen_measurements", ",", "datablock", ")", ":", "if", "cb", ".", "is_null", "(", "spec_container", ")", "or", "cb", ".", "is_null", "(", "this_specimen_measurements", ")", "or", "cb", ".", "is_null", "(", "datablock", ")", ":", "return", "ZED", "if", "'method_codes'", "not", "in", "spec_container", ".", "df", ".", "columns", ":", "return", "ZED", "prior_spec_data", "=", "spec_container", ".", "get_records_for_code", "(", "'LP-DIR'", ",", "strict_match", "=", "False", ")", "# look up all prior directional interpretations", "prior_specimen_interpretations", "=", "[", "]", "if", "not", "len", "(", "prior_spec_data", ")", ":", "return", "ZED", "mpars", "=", "{", "\"specimen_direction_type\"", ":", "\"Error\"", "}", "if", "len", "(", "prior_spec_data", ")", ":", "prior_specimen_interpretations", "=", "prior_spec_data", "[", "prior_spec_data", "[", "'specimen'", "]", ".", "astype", "(", "str", ")", "==", "this_specimen", "]", "#.str.match(this_specimen) == True]", "if", "len", "(", "prior_specimen_interpretations", ")", ":", "if", "len", "(", "prior_specimen_interpretations", ")", ">", "0", ":", "beg_pcas", "=", "pd", ".", "to_numeric", "(", "prior_specimen_interpretations", ".", "meas_step_min", ".", "values", ")", ".", "tolist", "(", ")", "end_pcas", "=", "pd", ".", "to_numeric", "(", "prior_specimen_interpretations", ".", "meas_step_max", ".", "values", ")", ".", "tolist", "(", ")", "spec_methods", "=", "prior_specimen_interpretations", ".", "method_codes", ".", "tolist", "(", ")", "# step through all prior interpretations and plot them", "for", "ind", "in", "range", "(", "len", "(", "beg_pcas", ")", ")", ":", "spec_meths", "=", "spec_methods", "[", "ind", "]", ".", "split", "(", "':'", ")", "for", "m", "in", "spec_meths", ":", "if", "'DE-BFL'", "in", "m", ":", "calculation_type", "=", "'DE-BFL'", "# best fit line", "if", "'DE-BFP'", "in", "m", ":", "calculation_type", "=", "'DE-BFP'", "# best fit plane", "if", "'DE-FM'", "in", "m", ":", "calculation_type", "=", "'DE-FM'", "# fisher mean", "if", "'DE-BFL-A'", "in", "m", ":", "calculation_type", "=", "'DE-BFL-A'", "# anchored best fit line", "treatments", "=", "pd", ".", "to_numeric", "(", "this_specimen_measurements", ".", "treatment", ")", ".", "tolist", "(", ")", "if", "len", "(", "beg_pcas", ")", "!=", "0", ":", "try", ":", "# getting the starting and ending points", "start", ",", "end", "=", "treatments", ".", "index", "(", "beg_pcas", "[", "ind", "]", ")", ",", "treatments", ".", "index", "(", "end_pcas", "[", "ind", "]", ")", "mpars", "=", "pmag", ".", "domean", "(", "datablock", ",", "start", ",", "end", ",", "calculation_type", ")", "except", "ValueError", "as", "ex", ":", "mpars", "[", "'specimen_direction_type'", "]", "=", "\"Error\"", "try", ":", "if", "beg_pcas", "[", "ind", "]", "==", "0", ":", "start", "=", "0", "else", ":", "start", "=", "treatments", ".", "index", "(", "beg_pcas", "[", "ind", "]", ")", "if", "end_pcas", "[", "ind", "]", "==", "0", ":", "end", "=", "0", "else", ":", "end", "=", "treatments", ".", "index", "(", "end_pcas", "[", "ind", "]", ")", "mpars", "=", "pmag", ".", "domean", "(", "datablock", ",", "start", ",", "end", ",", "calculation_type", ")", "except", "ValueError", ":", "mpars", "[", "'specimen_direction_type'", "]", "=", "\"Error\"", "# calculate direction/plane", "if", "mpars", "[", "\"specimen_direction_type\"", "]", "!=", "\"Error\"", ":", "# put it on the plot", "pmagplotlib", ".", "plot_dir", "(", "ZED", ",", "mpars", ",", "datablock", ",", "angle", ")", "#if interactive:", "# pmagplotlib.draw_figs(ZED)", "else", ":", "print", "(", "'\\n-W- Specimen {} record contains invalid start/stop bounds:'", ".", "format", "(", "this_specimen", ")", ")", "print", "(", "prior_spec_data", ".", "loc", "[", "this_specimen", "]", "[", "[", "'meas_step_min'", ",", "'meas_step_max'", "]", "]", ")", "print", "(", "'\\n Measurement records:'", ")", "cols", "=", "list", "(", "set", "(", "[", "'treat_ac_field'", ",", "'treat_temp'", "]", ")", ".", "intersection", "(", "this_specimen_measurements", ".", "columns", ")", ")", "print", "(", "this_specimen_measurements", "[", "cols", "]", ")", "print", "(", "'\\n Data will be plotted without interpretations\\n'", ")", "return", "ZED", "def", "make_plots", "(", "spec", ",", "cnt", ",", "meas_df", ",", "spec_container", ",", "samp_container", "=", "None", ")", ":", "# get sample data for orientation", "if", "spec_container", ":", "try", ":", "samps", "=", "spec_container", ".", "df", ".", "loc", "[", "spec", ",", "'sample'", "]", "except", "KeyError", ":", "samps", "=", "\"\"", "samp_df", "=", "[", "]", "if", "isinstance", "(", "samps", ",", "int", ")", "or", "isinstance", "(", "samps", ",", "float", ")", "or", "isinstance", "(", "samps", ",", "np", ".", "int64", ")", ":", "if", "np", ".", "isnan", "(", "samps", ")", ":", "samp", "=", "\"\"", "samp_df", "=", "[", "]", "else", ":", "samp", "=", "str", "(", "samps", ")", "samp_container", ".", "df", ".", "index", "=", "samp_container", ".", "df", ".", "index", ".", "astype", "(", "str", ")", "samp_df", "=", "samp_container", ".", "df", "[", "samp_container", ".", "df", ".", "index", "==", "samp", "]", "elif", "isinstance", "(", "samps", ",", "type", "(", "None", ")", ")", ":", "samp", "=", "\"\"", "samp_df", "=", "[", "]", "elif", "len", "(", "samps", ")", ":", "if", "isinstance", "(", "samps", ",", "str", ")", ":", "samp", "=", "samps", "else", ":", "samp", "=", "samps", ".", "iloc", "[", "0", "]", "samp_df", "=", "samp_container", ".", "df", "[", "samp_container", ".", "df", ".", "index", "==", "samp", "]", "else", ":", "samp_df", "=", "[", "]", "# we can make the figure dictionary that pmagplotlib likes:", "ZED", "=", "{", "'eqarea'", ":", "cnt", ",", "'zijd'", ":", "cnt", "+", "1", ",", "'demag'", ":", "cnt", "+", "2", "}", "# make datablock", "# get the relevant data", "spec_df", "=", "meas_df", "[", "meas_df", ".", "specimen", "==", "s", "]", "# remove ARM data", "spec_df", "=", "spec_df", "[", "-", "spec_df", ".", "method_codes", ".", "str", ".", "contains", "(", "'LP-*[\\w]*-ARM'", ")", "]", "# split data into NRM, thermal, and af dataframes", "spec_df_nrm", "=", "spec_df", "[", "spec_df", ".", "method_codes", ".", "str", ".", "contains", "(", "'LT-NO'", ")", "]", "# get the NRM data", "spec_df_th", "=", "spec_df", "[", "spec_df", ".", "method_codes", ".", "str", ".", "contains", "(", "'LT-T-Z'", ")", "]", "# zero field thermal demag steps", "try", ":", "cond", "=", "spec_df", ".", "method_codes", ".", "str", ".", "contains", "(", "'(^|[\\s\\:])LT-PTRM'", ")", "spec_df_th", "=", "spec_df_th", "[", "-", "cond", "]", "# get rid of some pTRM steps", "except", "ValueError", ":", "keep_inds", "=", "[", "]", "n", "=", "0", "for", "ind", ",", "row", "in", "spec_df_th", ".", "copy", "(", ")", ".", "iterrows", "(", ")", ":", "if", "'LT-PTRM'", "in", "row", "[", "'method_codes'", "]", "and", "'ALT-PTRM'", "not", "in", "row", "[", "'method_codes'", "]", ":", "keep_inds", ".", "append", "(", "n", ")", "else", ":", "pass", "n", "+=", "1", "if", "len", "(", "keep_inds", ")", "<", "n", ":", "spec_df_th", "=", "spec_df_th", ".", "iloc", "[", "keep_inds", "]", "spec_df_af", "=", "spec_df", "[", "spec_df", ".", "method_codes", ".", "str", ".", "contains", "(", "'LT-AF-Z'", ")", "]", "this_spec_meas_df", "=", "None", "datablock", "=", "None", "if", "(", "not", "len", "(", "spec_df_th", ".", "index", ")", ">", "1", ")", "and", "(", "not", "len", "(", "spec_df_af", ".", "index", ")", ">", "1", ")", ":", "return", "if", "len", "(", "spec_df_th", ".", "index", ")", ">", "1", ":", "# this is a thermal run", "this_spec_meas_df", "=", "pd", ".", "concat", "(", "[", "spec_df_nrm", ",", "spec_df_th", "]", ")", "# make sure all decs/incs are filled in", "n_rows", "=", "len", "(", "this_spec_meas_df", ")", "this_spec_meas_df", "=", "this_spec_meas_df", ".", "dropna", "(", "how", "=", "'any'", ",", "subset", "=", "[", "'dir_dec'", ",", "'dir_inc'", ",", "'magn_moment'", "]", ")", "if", "n_rows", ">", "len", "(", "this_spec_meas_df", ")", ":", "print", "(", "'-W- Some dec/inc/moment data were missing for specimen {}, so {} measurement row(s) were excluded'", ".", "format", "(", "s", ",", "n_rows", "-", "len", "(", "this_spec_meas_df", ")", ")", ")", "# geographic transformation", "if", "coord", "!=", "\"-1\"", "and", "len", "(", "samp_df", ")", ":", "this_spec_meas_df", "=", "transform_to_geographic", "(", "this_spec_meas_df", ",", "samp_df", ",", "samp", ",", "coord", ")", "units", "=", "'K'", "# units are kelvin", "try", ":", "this_spec_meas_df", "[", "'magn_moment'", "]", "=", "this_spec_meas_df", "[", "'magn_moment'", "]", ".", "astype", "(", "float", ")", "this_spec_meas_df", "[", "'treat_temp'", "]", "=", "this_spec_meas_df", "[", "'treat_temp'", "]", ".", "astype", "(", "float", ")", "except", ":", "print", "(", "'-W- There are malformed or missing data for specimen {}, skipping'", ".", "format", "(", "spec", ")", ")", "return", "datablock", "=", "this_spec_meas_df", "[", "[", "'treat_temp'", ",", "'dir_dec'", ",", "'dir_inc'", ",", "'magn_moment'", ",", "'blank'", ",", "'quality'", "]", "]", ".", "values", ".", "tolist", "(", ")", "ZED", "=", "pmagplotlib", ".", "plot_zed", "(", "ZED", ",", "datablock", ",", "angle", ",", "s", ",", "units", ")", "if", "len", "(", "spec_df_af", ".", "index", ")", ">", "1", ":", "# this is an af run", "this_spec_meas_df", "=", "pd", ".", "concat", "(", "[", "spec_df_nrm", ",", "spec_df_af", "]", ")", "# make sure all decs/incs are filled in", "n_rows", "=", "len", "(", "this_spec_meas_df", ")", "this_spec_meas_df", "=", "this_spec_meas_df", ".", "dropna", "(", "how", "=", "'any'", ",", "subset", "=", "[", "'dir_dec'", ",", "'dir_inc'", ",", "'magn_moment'", "]", ")", "if", "n_rows", ">", "len", "(", "this_spec_meas_df", ")", ":", "print", "(", "'-W- Some dec/inc/moment data were missing for specimen {}, so {} measurement row(s) were excluded'", ".", "format", "(", "s", ",", "n_rows", "-", "len", "(", "this_spec_meas_df", ")", ")", ")", "# geographic transformation", "if", "coord", "!=", "\"-1\"", "and", "len", "(", "samp_df", ")", ":", "this_spec_meas_df", "=", "transform_to_geographic", "(", "this_spec_meas_df", ",", "samp_df", ",", "samp", ",", "coord", ")", "units", "=", "'T'", "# these are AF data", "try", ":", "this_spec_meas_df", "[", "'magn_moment'", "]", "=", "this_spec_meas_df", "[", "'magn_moment'", "]", ".", "astype", "(", "float", ")", "this_spec_meas_df", "[", "'treat_ac_field'", "]", "=", "this_spec_meas_df", "[", "'treat_ac_field'", "]", ".", "astype", "(", "float", ")", "except", ":", "print", "(", "'-W- There are malformed or missing data for specimen {}, skipping'", ".", "format", "(", "spec", ")", ")", "return", "datablock", "=", "this_spec_meas_df", "[", "[", "'treat_ac_field'", ",", "'dir_dec'", ",", "'dir_inc'", ",", "'magn_moment'", ",", "'blank'", ",", "'quality'", "]", "]", ".", "values", ".", "tolist", "(", ")", "ZED", "=", "pmagplotlib", ".", "plot_zed", "(", "ZED", ",", "datablock", ",", "angle", ",", "s", ",", "units", ")", "return", "plot_interpretations", "(", "ZED", ",", "spec_container", ",", "s", ",", "this_spec_meas_df", ",", "datablock", ")", "if", "interactive", ":", "save_plots", "=", "False", "# read in MagIC formatted data if contribution object not provided", "if", "not", "isinstance", "(", "contribution", ",", "cb", ".", "Contribution", ")", ":", "input_dir_path", "=", "os", ".", "path", ".", "realpath", "(", "input_dir_path", ")", "file_path", "=", "pmag", ".", "resolve_file_name", "(", "meas_file", ",", "input_dir_path", ")", "# read in magic formatted data", "if", "not", "os", ".", "path", ".", "exists", "(", "file_path", ")", ":", "print", "(", "'No such file:'", ",", "file_path", ")", "return", "False", ",", "[", "]", "custom_filenames", "=", "{", "'measurements'", ":", "file_path", ",", "'specimens'", ":", "spec_file", ",", "'samples'", ":", "samp_file", "}", "contribution", "=", "cb", ".", "Contribution", "(", "input_dir_path", ",", "custom_filenames", "=", "custom_filenames", ",", "read_tables", "=", "[", "'measurements'", ",", "'specimens'", ",", "'contribution'", ",", "'samples'", "]", ")", "if", "pmagplotlib", ".", "isServer", ":", "try", ":", "contribution", ".", "propagate_location_to_samples", "(", ")", "contribution", ".", "propagate_location_to_specimens", "(", ")", "contribution", ".", "propagate_location_to_measurements", "(", ")", "except", "KeyError", "as", "ex", ":", "pass", "meas_container", "=", "contribution", ".", "tables", "[", "'measurements'", "]", "meas_df", "=", "contribution", ".", "tables", "[", "'measurements'", "]", ".", "df", "#", "#meas_df=pd.read_csv(file_path, sep='\\t', header=1)", "spec_container", "=", "contribution", ".", "tables", ".", "get", "(", "'specimens'", ",", "None", ")", "samp_container", "=", "contribution", ".", "tables", ".", "get", "(", "'samples'", ",", "None", ")", "#if not spec_file:", "# spec_file = os.path.join(os.path.split(file_path)[0], \"specimens.txt\")", "#if os.path.exists(spec_file):", "# spec_container = cb.MagicDataFrame(spec_file, dtype=\"specimens\")", "#else:", "# spec_container = None", "meas_df", "[", "'blank'", "]", "=", "\"\"", "# this is a dummy variable expected by plotZED", "if", "'treat_ac_field'", "in", "meas_df", ".", "columns", ":", "# create 'treatment' column.", "# uses treat_temp if treat_ac_field is missing OR zero.", "# (have to take this into account for plotting later)", "if", "'treat_temp'", "in", "meas_df", ".", "columns", ":", "meas_df", "[", "'treatment'", "]", "=", "meas_df", "[", "'treat_ac_field'", "]", ".", "where", "(", "cond", "=", "meas_df", "[", "'treat_ac_field'", "]", ".", "astype", "(", "bool", ")", ",", "other", "=", "meas_df", "[", "'treat_temp'", "]", ")", "else", ":", "meas_df", "[", "'treatment'", "]", "=", "meas_df", "[", "'treat_ac_field'", "]", "else", ":", "meas_df", "[", "'treatment'", "]", "=", "meas_df", "[", "'treat_temp'", "]", "if", "crd", "==", "\"s\"", ":", "coord", "=", "\"-1\"", "elif", "crd", "==", "\"t\"", ":", "coord", "=", "\"100\"", "else", ":", "coord", "=", "\"0\"", "specimens", "=", "meas_df", ".", "specimen", ".", "unique", "(", ")", "# list of specimen names", "if", "len", "(", "specimens", ")", "==", "0", ":", "print", "(", "'there are no data for plotting'", ")", "return", "False", ",", "[", "]", "# check measurement table for req'd fields", "missing", "=", "[", "]", "reqd_cols_present", "=", "meas_df", ".", "columns", ".", "intersection", "(", "[", "'dir_dec'", ",", "'dir_inc'", ",", "'magn_moment'", "]", ")", "for", "col", "in", "[", "'dir_dec'", ",", "'dir_inc'", ",", "'magn_moment'", "]", ":", "if", "col", "not", "in", "reqd_cols_present", ":", "missing", ".", "append", "(", "col", ")", "if", "missing", ":", "print", "(", "'-W- Missing required column(s) {}, cannot run zeq_magic'", ".", "format", "(", "', '", ".", "join", "(", "missing", ")", ")", ")", "return", "False", ",", "[", "]", "cnt", "=", "fignum", "if", "n_plots", "!=", "\"all\"", ":", "if", "len", "(", "specimens", ")", ">", "n_plots", ":", "specimens", "=", "specimens", "[", ":", "n_plots", "]", "saved", "=", "[", "]", "if", "specimen", ":", "specimens", "=", "[", "specimen", "]", "for", "s", "in", "specimens", ":", "ZED", "=", "make_plots", "(", "s", ",", "cnt", ",", "meas_df", ",", "spec_container", ",", "samp_container", ")", "if", "not", "ZED", ":", "if", "pmagplotlib", ".", "verbose", ":", "print", "(", "'No plots could be created for specimen:'", ",", "s", ")", "continue", "titles", "=", "{", "key", ":", "s", "+", "\"_\"", "+", "key", "+", "\".\"", "+", "fmt", "for", "key", "in", "ZED", "}", "if", "pmagplotlib", ".", "isServer", ":", "titles", "=", "{", "}", "titles", "[", "'eqarea'", "]", "=", "'Equal Area Plot'", "titles", "[", "'zijd'", "]", "=", "'Zijderveld Plot'", "titles", "[", "'demag'", "]", "=", "'Demagnetization Plot'", "con_id", "=", "\"\"", "if", "'contribution'", "in", "contribution", ".", "tables", ":", "if", "'id'", "in", "contribution", ".", "tables", "[", "'contribution'", "]", ".", "df", ".", "columns", ":", "con_id", "=", "contribution", ".", "tables", "[", "'contribution'", "]", ".", "df", "[", "'id'", "]", ".", "values", "[", "0", "]", "pmagplotlib", ".", "add_borders", "(", "ZED", ",", "titles", ",", "con_id", "=", "con_id", ")", "for", "title", "in", "titles", ":", "# try to get the full hierarchy for plot names", "df_slice", "=", "meas_container", ".", "df", "[", "meas_container", ".", "df", "[", "'specimen'", "]", "==", "s", "]", "location", "=", "str", "(", "meas_container", ".", "get_name", "(", "'location'", ",", "df_slice", ")", ")", "site", "=", "str", "(", "meas_container", ".", "get_name", "(", "'site'", ",", "df_slice", ")", ")", "sample", "=", "str", "(", "meas_container", ".", "get_name", "(", "'sample'", ",", "df_slice", ")", ")", "# add coord here!", "filename", "=", "'LO:_'", "+", "location", "+", "'_SI:_'", "+", "site", "+", "'_SA:_'", "+", "sample", "+", "'_SP:_'", "+", "str", "(", "s", ")", "+", "'_CO:_'", "+", "'_TY:_'", "+", "title", "+", "'_.png'", "titles", "[", "title", "]", "=", "filename", "if", "save_plots", ":", "saved", ".", "extend", "(", "pmagplotlib", ".", "save_plots", "(", "ZED", ",", "titles", ")", ")", "elif", "interactive", ":", "pmagplotlib", ".", "draw_figs", "(", "ZED", ")", "ans", "=", "pmagplotlib", ".", "save_or_quit", "(", ")", "if", "ans", "==", "'a'", ":", "saved", ".", "extend", "(", "pmagplotlib", ".", "save_plots", "(", "ZED", ",", "titles", ")", ")", "else", ":", "continue", "else", ":", "cnt", "+=", "3", "return", "True", ",", "saved" ]
zeq_magic makes zijderveld and equal area plots for magic formatted measurements files. Parameters ---------- meas_file : str input measurement file spec_file : str input specimen interpretation file samp_file : str input sample orientations file crd : str coordinate system [s,g,t] for specimen, geographic, tilt corrected g,t options require a sample file with specimen and bedding orientation input_dir_path : str input directory of meas_file, default "." angle : float angle of X direction with respect to specimen X n_plots : int, default 5 maximum number of plots to make if you want to make all possible plots, specify "all" save_plots : bool, default True if True, create and save all requested plots fmt : str, default "svg" format for figures, [svg, jpg, pdf, png] interactive : bool, default False interactively plot and display for each specimen (this is best used on the command line only) specimen : str, default "" specimen name to plot samp_file : str, default 'samples.txt' name of samples file contribution : cb.Contribution, default None if provided, use Contribution object instead of reading in data from files fignum : matplotlib figure number
[ "zeq_magic", "makes", "zijderveld", "and", "equal", "area", "plots", "for", "magic", "formatted", "measurements", "files", ".", "Parameters", "----------", "meas_file", ":", "str", "input", "measurement", "file", "spec_file", ":", "str", "input", "specimen", "interpretation", "file", "samp_file", ":", "str", "input", "sample", "orientations", "file", "crd", ":", "str", "coordinate", "system", "[", "s", "g", "t", "]", "for", "specimen", "geographic", "tilt", "corrected", "g", "t", "options", "require", "a", "sample", "file", "with", "specimen", "and", "bedding", "orientation", "input_dir_path", ":", "str", "input", "directory", "of", "meas_file", "default", ".", "angle", ":", "float", "angle", "of", "X", "direction", "with", "respect", "to", "specimen", "X", "n_plots", ":", "int", "default", "5", "maximum", "number", "of", "plots", "to", "make", "if", "you", "want", "to", "make", "all", "possible", "plots", "specify", "all", "save_plots", ":", "bool", "default", "True", "if", "True", "create", "and", "save", "all", "requested", "plots", "fmt", ":", "str", "default", "svg", "format", "for", "figures", "[", "svg", "jpg", "pdf", "png", "]", "interactive", ":", "bool", "default", "False", "interactively", "plot", "and", "display", "for", "each", "specimen", "(", "this", "is", "best", "used", "on", "the", "command", "line", "only", ")", "specimen", ":", "str", "default", "specimen", "name", "to", "plot", "samp_file", ":", "str", "default", "samples", ".", "txt", "name", "of", "samples", "file", "contribution", ":", "cb", ".", "Contribution", "default", "None", "if", "provided", "use", "Contribution", "object", "instead", "of", "reading", "in", "data", "from", "files", "fignum", ":", "matplotlib", "figure", "number" ]
python
train
49.015385
chrisspen/dtree
dtree.py
https://github.com/chrisspen/dtree/blob/9e9c9992b22ad9a7e296af7e6837666b05db43ef/dtree.py#L270-L277
def update(self, dist): """ Adds the given distribution's counts to the current distribution. """ assert isinstance(dist, DDist) for k, c in iteritems(dist.counts): self.counts[k] += c self.total += dist.total
[ "def", "update", "(", "self", ",", "dist", ")", ":", "assert", "isinstance", "(", "dist", ",", "DDist", ")", "for", "k", ",", "c", "in", "iteritems", "(", "dist", ".", "counts", ")", ":", "self", ".", "counts", "[", "k", "]", "+=", "c", "self", ".", "total", "+=", "dist", ".", "total" ]
Adds the given distribution's counts to the current distribution.
[ "Adds", "the", "given", "distribution", "s", "counts", "to", "the", "current", "distribution", "." ]
python
train
32.75
peeringdb/peeringdb-py
peeringdb/backend.py
https://github.com/peeringdb/peeringdb-py/blob/cf2060a1d5ef879a01cf849e54b7756909ab2661/peeringdb/backend.py#L8-L27
def reftag_to_cls(fn): """ decorator that checks function arguments for `concrete` and `resource` and will properly set them to class references if a string (reftag) is passed as the value """ names, _, _, values = inspect.getargspec(fn) @wraps(fn) def wrapped(*args, **kwargs): i = 0 backend = args[0] for name in names[1:]: value = args[i] if name == "concrete" and isinstance(value, six.string_types): args[i] = backend.REFTAG_CONCRETE[value] elif name == "resource" and isinstance(value, six.string_types): args[i] = backend.REFTAG_RESOURCE[value] i += 1 return fn(*args, **kwargs) return wrapped
[ "def", "reftag_to_cls", "(", "fn", ")", ":", "names", ",", "_", ",", "_", ",", "values", "=", "inspect", ".", "getargspec", "(", "fn", ")", "@", "wraps", "(", "fn", ")", "def", "wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "i", "=", "0", "backend", "=", "args", "[", "0", "]", "for", "name", "in", "names", "[", "1", ":", "]", ":", "value", "=", "args", "[", "i", "]", "if", "name", "==", "\"concrete\"", "and", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", ":", "args", "[", "i", "]", "=", "backend", ".", "REFTAG_CONCRETE", "[", "value", "]", "elif", "name", "==", "\"resource\"", "and", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", ":", "args", "[", "i", "]", "=", "backend", ".", "REFTAG_RESOURCE", "[", "value", "]", "i", "+=", "1", "return", "fn", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapped" ]
decorator that checks function arguments for `concrete` and `resource` and will properly set them to class references if a string (reftag) is passed as the value
[ "decorator", "that", "checks", "function", "arguments", "for", "concrete", "and", "resource", "and", "will", "properly", "set", "them", "to", "class", "references", "if", "a", "string", "(", "reftag", ")", "is", "passed", "as", "the", "value" ]
python
train
36.45
albahnsen/CostSensitiveClassification
costcla/probcal/probcal.py
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/probcal/probcal.py#L137-L161
def predict_proba(self, p): """ Calculate the calibrated probabilities Parameters ---------- y_prob : array-like of shape = [n_samples, 2] Predicted probabilities to be calibrated using calibration map Returns ------- y_prob_cal : array-like of shape = [n_samples, 1] Predicted calibrated probabilities """ # TODO: Check input if p.size != p.shape[0]: p = p[:, 1] calibrated_proba = np.zeros(p.shape[0]) for i in range(self.calibration_map.shape[0]): calibrated_proba[np.logical_and(self.calibration_map[i, 1] <= p, self.calibration_map[i, 0] > p)] = \ self.calibration_map[i, 2] # TODO: return 2D and refactor return calibrated_proba
[ "def", "predict_proba", "(", "self", ",", "p", ")", ":", "# TODO: Check input", "if", "p", ".", "size", "!=", "p", ".", "shape", "[", "0", "]", ":", "p", "=", "p", "[", ":", ",", "1", "]", "calibrated_proba", "=", "np", ".", "zeros", "(", "p", ".", "shape", "[", "0", "]", ")", "for", "i", "in", "range", "(", "self", ".", "calibration_map", ".", "shape", "[", "0", "]", ")", ":", "calibrated_proba", "[", "np", ".", "logical_and", "(", "self", ".", "calibration_map", "[", "i", ",", "1", "]", "<=", "p", ",", "self", ".", "calibration_map", "[", "i", ",", "0", "]", ">", "p", ")", "]", "=", "self", ".", "calibration_map", "[", "i", ",", "2", "]", "# TODO: return 2D and refactor", "return", "calibrated_proba" ]
Calculate the calibrated probabilities Parameters ---------- y_prob : array-like of shape = [n_samples, 2] Predicted probabilities to be calibrated using calibration map Returns ------- y_prob_cal : array-like of shape = [n_samples, 1] Predicted calibrated probabilities
[ "Calculate", "the", "calibrated", "probabilities" ]
python
train
31.64
spyder-ide/spyder
spyder/plugins/plots/widgets/figurebrowser.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/plots/widgets/figurebrowser.py#L517-L539
def setup_scrollarea(self): """Setup the scrollarea that will contain the FigureThumbnails.""" self.view = QWidget() self.scene = QGridLayout(self.view) self.scene.setColumnStretch(0, 100) self.scene.setColumnStretch(2, 100) self.scrollarea = QScrollArea() self.scrollarea.setWidget(self.view) self.scrollarea.setWidgetResizable(True) self.scrollarea.setFrameStyle(0) self.scrollarea.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.scrollarea.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.scrollarea.setSizePolicy(QSizePolicy(QSizePolicy.Ignored, QSizePolicy.Preferred)) # Set the vertical scrollbar explicitely : # This is required to avoid a "RuntimeError: no access to protected # functions or signals for objects not created from Python" in Linux. self.scrollarea.setVerticalScrollBar(QScrollBar()) return self.scrollarea
[ "def", "setup_scrollarea", "(", "self", ")", ":", "self", ".", "view", "=", "QWidget", "(", ")", "self", ".", "scene", "=", "QGridLayout", "(", "self", ".", "view", ")", "self", ".", "scene", ".", "setColumnStretch", "(", "0", ",", "100", ")", "self", ".", "scene", ".", "setColumnStretch", "(", "2", ",", "100", ")", "self", ".", "scrollarea", "=", "QScrollArea", "(", ")", "self", ".", "scrollarea", ".", "setWidget", "(", "self", ".", "view", ")", "self", ".", "scrollarea", ".", "setWidgetResizable", "(", "True", ")", "self", ".", "scrollarea", ".", "setFrameStyle", "(", "0", ")", "self", ".", "scrollarea", ".", "setVerticalScrollBarPolicy", "(", "Qt", ".", "ScrollBarAlwaysOff", ")", "self", ".", "scrollarea", ".", "setHorizontalScrollBarPolicy", "(", "Qt", ".", "ScrollBarAlwaysOff", ")", "self", ".", "scrollarea", ".", "setSizePolicy", "(", "QSizePolicy", "(", "QSizePolicy", ".", "Ignored", ",", "QSizePolicy", ".", "Preferred", ")", ")", "# Set the vertical scrollbar explicitely :", "# This is required to avoid a \"RuntimeError: no access to protected", "# functions or signals for objects not created from Python\" in Linux.", "self", ".", "scrollarea", ".", "setVerticalScrollBar", "(", "QScrollBar", "(", ")", ")", "return", "self", ".", "scrollarea" ]
Setup the scrollarea that will contain the FigureThumbnails.
[ "Setup", "the", "scrollarea", "that", "will", "contain", "the", "FigureThumbnails", "." ]
python
train
43.956522
datadesk/django-bakery
bakery/management/commands/build.py
https://github.com/datadesk/django-bakery/blob/e2feb13a66552a388fbcfaaacdd504bba08d3c69/bakery/management/commands/build.py#L90-L115
def handle(self, *args, **options): """ Making it happen. """ logger.info("Build started") # Set options self.set_options(*args, **options) # Get the build directory ready if not options.get("keep_build_dir"): self.init_build_dir() # Build up static files if not options.get("skip_static"): self.build_static() # Build the media directory if not options.get("skip_media"): self.build_media() # Build views self.build_views() # Close out logger.info("Build finished")
[ "def", "handle", "(", "self", ",", "*", "args", ",", "*", "*", "options", ")", ":", "logger", ".", "info", "(", "\"Build started\"", ")", "# Set options", "self", ".", "set_options", "(", "*", "args", ",", "*", "*", "options", ")", "# Get the build directory ready", "if", "not", "options", ".", "get", "(", "\"keep_build_dir\"", ")", ":", "self", ".", "init_build_dir", "(", ")", "# Build up static files", "if", "not", "options", ".", "get", "(", "\"skip_static\"", ")", ":", "self", ".", "build_static", "(", ")", "# Build the media directory", "if", "not", "options", ".", "get", "(", "\"skip_media\"", ")", ":", "self", ".", "build_media", "(", ")", "# Build views", "self", ".", "build_views", "(", ")", "# Close out", "logger", ".", "info", "(", "\"Build finished\"", ")" ]
Making it happen.
[ "Making", "it", "happen", "." ]
python
train
23.5
eqcorrscan/EQcorrscan
eqcorrscan/utils/archive_read.py
https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/archive_read.py#L31-L140
def read_data(archive, arc_type, day, stachans, length=86400): """ Function to read the appropriate data from an archive for a day. :type archive: str :param archive: The archive source - if arc_type is seishub, this should be a url, if the arc_type is FDSN then this can be either a url or a known obspy client. If arc_type is day_vols, then this is the path to the top directory. :type arc_type: str :param arc_type: The type of archive, can be: seishub, FDSN, day_volumes :type day: datetime.date :param day: Date to retrieve data for :type stachans: list :param stachans: List of tuples of Stations and channels to try and get, will not fail if stations are not available, but will warn. :type length: float :param length: Data length to extract in seconds, defaults to 1 day. :returns: Stream of data :rtype: obspy.core.stream.Stream .. note:: A note on arc_types, if arc_type is day_vols, then this will \ look for directories labelled in the IRIS DMC conventions of \ Yyyyy/Rjjj.01/... where yyyy is the year and jjj is the julian day. \ Data within these files directories should be stored as day-long, \ single-channel files. This is not implemented in the fasted way \ possible to allow for a more general situation. If you require more \ speed you will need to re-write this. .. rubric:: Example >>> from obspy import UTCDateTime >>> t1 = UTCDateTime(2012, 3, 26) >>> stachans = [('JCNB', 'SP1')] >>> st = read_data('NCEDC', 'FDSN', t1, stachans) >>> print(st) 1 Trace(s) in Stream: BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\ 950000Z | 20.0 Hz, 1728000 samples .. rubric:: Example, missing data >>> t1 = UTCDateTime(2012, 3, 26) >>> stachans = [('JCNB', 'SP1'), ('GCSZ', 'HHZ')] >>> st = read_data('NCEDC', 'FDSN', t1, stachans) >>> print(st) 1 Trace(s) in Stream: BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\ 950000Z | 20.0 Hz, 1728000 samples .. rubric:: Example, local day-volumes >>> # Get the path to the test data >>> import eqcorrscan >>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data' >>> t1 = UTCDateTime(2012, 3, 26) >>> stachans = [('WHYM', 'SHZ'), ('EORO', 'SHZ')] >>> st = read_data(TEST_PATH + '/day_vols', 'day_vols', ... t1, stachans) >>> print(st) 2 Trace(s) in Stream: AF.WHYM..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \ | 1.0 Hz, 86400 samples AF.EORO..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \ | 1.0 Hz, 86400 samples """ st = [] available_stations = _check_available_data(archive, arc_type, day) for station in stachans: if len(station[1]) == 2: # Cope with two char channel naming in seisan station_map = (station[0], station[1][0] + '*' + station[1][1]) available_stations_map = [(sta[0], sta[1][0] + '*' + sta[1][-1]) for sta in available_stations] else: station_map = station available_stations_map = available_stations if station_map not in available_stations_map: msg = ' '.join([station[0], station_map[1], 'is not available for', day.strftime('%Y/%m/%d')]) warnings.warn(msg) continue if arc_type.lower() == 'seishub': client = SeishubClient(archive) st += client.get_waveforms( network='*', station=station_map[0], location='*', channel=station_map[1], starttime=UTCDateTime(day), endtime=UTCDateTime(day) + length) elif arc_type.upper() == "FDSN": client = FDSNClient(archive) try: st += client.get_waveforms( network='*', station=station_map[0], location='*', channel=station_map[1], starttime=UTCDateTime(day), endtime=UTCDateTime(day) + length) except FDSNException: warnings.warn('No data on server despite station being ' + 'available...') continue elif arc_type.lower() == 'day_vols': wavfiles = _get_station_file(os.path.join( archive, day.strftime('Y%Y' + os.sep + 'R%j.01')), station_map[0], station_map[1]) for wavfile in wavfiles: st += read(wavfile, starttime=day, endtime=day + length) st = Stream(st) return st
[ "def", "read_data", "(", "archive", ",", "arc_type", ",", "day", ",", "stachans", ",", "length", "=", "86400", ")", ":", "st", "=", "[", "]", "available_stations", "=", "_check_available_data", "(", "archive", ",", "arc_type", ",", "day", ")", "for", "station", "in", "stachans", ":", "if", "len", "(", "station", "[", "1", "]", ")", "==", "2", ":", "# Cope with two char channel naming in seisan", "station_map", "=", "(", "station", "[", "0", "]", ",", "station", "[", "1", "]", "[", "0", "]", "+", "'*'", "+", "station", "[", "1", "]", "[", "1", "]", ")", "available_stations_map", "=", "[", "(", "sta", "[", "0", "]", ",", "sta", "[", "1", "]", "[", "0", "]", "+", "'*'", "+", "sta", "[", "1", "]", "[", "-", "1", "]", ")", "for", "sta", "in", "available_stations", "]", "else", ":", "station_map", "=", "station", "available_stations_map", "=", "available_stations", "if", "station_map", "not", "in", "available_stations_map", ":", "msg", "=", "' '", ".", "join", "(", "[", "station", "[", "0", "]", ",", "station_map", "[", "1", "]", ",", "'is not available for'", ",", "day", ".", "strftime", "(", "'%Y/%m/%d'", ")", "]", ")", "warnings", ".", "warn", "(", "msg", ")", "continue", "if", "arc_type", ".", "lower", "(", ")", "==", "'seishub'", ":", "client", "=", "SeishubClient", "(", "archive", ")", "st", "+=", "client", ".", "get_waveforms", "(", "network", "=", "'*'", ",", "station", "=", "station_map", "[", "0", "]", ",", "location", "=", "'*'", ",", "channel", "=", "station_map", "[", "1", "]", ",", "starttime", "=", "UTCDateTime", "(", "day", ")", ",", "endtime", "=", "UTCDateTime", "(", "day", ")", "+", "length", ")", "elif", "arc_type", ".", "upper", "(", ")", "==", "\"FDSN\"", ":", "client", "=", "FDSNClient", "(", "archive", ")", "try", ":", "st", "+=", "client", ".", "get_waveforms", "(", "network", "=", "'*'", ",", "station", "=", "station_map", "[", "0", "]", ",", "location", "=", "'*'", ",", "channel", "=", "station_map", "[", "1", "]", ",", "starttime", "=", "UTCDateTime", "(", "day", ")", ",", "endtime", "=", "UTCDateTime", "(", "day", ")", "+", "length", ")", "except", "FDSNException", ":", "warnings", ".", "warn", "(", "'No data on server despite station being '", "+", "'available...'", ")", "continue", "elif", "arc_type", ".", "lower", "(", ")", "==", "'day_vols'", ":", "wavfiles", "=", "_get_station_file", "(", "os", ".", "path", ".", "join", "(", "archive", ",", "day", ".", "strftime", "(", "'Y%Y'", "+", "os", ".", "sep", "+", "'R%j.01'", ")", ")", ",", "station_map", "[", "0", "]", ",", "station_map", "[", "1", "]", ")", "for", "wavfile", "in", "wavfiles", ":", "st", "+=", "read", "(", "wavfile", ",", "starttime", "=", "day", ",", "endtime", "=", "day", "+", "length", ")", "st", "=", "Stream", "(", "st", ")", "return", "st" ]
Function to read the appropriate data from an archive for a day. :type archive: str :param archive: The archive source - if arc_type is seishub, this should be a url, if the arc_type is FDSN then this can be either a url or a known obspy client. If arc_type is day_vols, then this is the path to the top directory. :type arc_type: str :param arc_type: The type of archive, can be: seishub, FDSN, day_volumes :type day: datetime.date :param day: Date to retrieve data for :type stachans: list :param stachans: List of tuples of Stations and channels to try and get, will not fail if stations are not available, but will warn. :type length: float :param length: Data length to extract in seconds, defaults to 1 day. :returns: Stream of data :rtype: obspy.core.stream.Stream .. note:: A note on arc_types, if arc_type is day_vols, then this will \ look for directories labelled in the IRIS DMC conventions of \ Yyyyy/Rjjj.01/... where yyyy is the year and jjj is the julian day. \ Data within these files directories should be stored as day-long, \ single-channel files. This is not implemented in the fasted way \ possible to allow for a more general situation. If you require more \ speed you will need to re-write this. .. rubric:: Example >>> from obspy import UTCDateTime >>> t1 = UTCDateTime(2012, 3, 26) >>> stachans = [('JCNB', 'SP1')] >>> st = read_data('NCEDC', 'FDSN', t1, stachans) >>> print(st) 1 Trace(s) in Stream: BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\ 950000Z | 20.0 Hz, 1728000 samples .. rubric:: Example, missing data >>> t1 = UTCDateTime(2012, 3, 26) >>> stachans = [('JCNB', 'SP1'), ('GCSZ', 'HHZ')] >>> st = read_data('NCEDC', 'FDSN', t1, stachans) >>> print(st) 1 Trace(s) in Stream: BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\ 950000Z | 20.0 Hz, 1728000 samples .. rubric:: Example, local day-volumes >>> # Get the path to the test data >>> import eqcorrscan >>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data' >>> t1 = UTCDateTime(2012, 3, 26) >>> stachans = [('WHYM', 'SHZ'), ('EORO', 'SHZ')] >>> st = read_data(TEST_PATH + '/day_vols', 'day_vols', ... t1, stachans) >>> print(st) 2 Trace(s) in Stream: AF.WHYM..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \ | 1.0 Hz, 86400 samples AF.EORO..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \ | 1.0 Hz, 86400 samples
[ "Function", "to", "read", "the", "appropriate", "data", "from", "an", "archive", "for", "a", "day", "." ]
python
train
42.045455
billyoverton/tweetqueue
tweetqueue/TweetList.py
https://github.com/billyoverton/tweetqueue/blob/e54972a0137ea2a21b2357b81408d9d4c92fdd61/tweetqueue/TweetList.py#L57-L78
def append(self, tweet): """Add a tweet to the end of the list.""" c = self.connection.cursor() last_tweet = c.execute("SELECT tweet from tweetlist where label='last_tweet'").next()[0] c.execute("INSERT INTO tweets(message, previous_tweet, next_tweet) VALUES (?,?,NULL)", (tweet, last_tweet)) tweet_id = c.lastrowid # Set the current tweet as the last tweet c.execute("UPDATE tweetlist SET tweet=? WHERE label='last_tweet'", (tweet_id,)) # If there was no last_tweet, there was no first_tweet # so make this the first tweet if last_tweet is None: c.execute("UPDATE tweetlist SET tweet=? WHERE label='first_tweet'", (tweet_id,)) else: # Update the last tweets reference to this one c.execute("UPDATE tweets SET next_tweet = ? WHERE id= ? ", (tweet_id, last_tweet)) self.connection.commit() c.close()
[ "def", "append", "(", "self", ",", "tweet", ")", ":", "c", "=", "self", ".", "connection", ".", "cursor", "(", ")", "last_tweet", "=", "c", ".", "execute", "(", "\"SELECT tweet from tweetlist where label='last_tweet'\"", ")", ".", "next", "(", ")", "[", "0", "]", "c", ".", "execute", "(", "\"INSERT INTO tweets(message, previous_tweet, next_tweet) VALUES (?,?,NULL)\"", ",", "(", "tweet", ",", "last_tweet", ")", ")", "tweet_id", "=", "c", ".", "lastrowid", "# Set the current tweet as the last tweet", "c", ".", "execute", "(", "\"UPDATE tweetlist SET tweet=? WHERE label='last_tweet'\"", ",", "(", "tweet_id", ",", ")", ")", "# If there was no last_tweet, there was no first_tweet", "# so make this the first tweet", "if", "last_tweet", "is", "None", ":", "c", ".", "execute", "(", "\"UPDATE tweetlist SET tweet=? WHERE label='first_tweet'\"", ",", "(", "tweet_id", ",", ")", ")", "else", ":", "# Update the last tweets reference to this one", "c", ".", "execute", "(", "\"UPDATE tweets SET next_tweet = ? WHERE id= ? \"", ",", "(", "tweet_id", ",", "last_tweet", ")", ")", "self", ".", "connection", ".", "commit", "(", ")", "c", ".", "close", "(", ")" ]
Add a tweet to the end of the list.
[ "Add", "a", "tweet", "to", "the", "end", "of", "the", "list", "." ]
python
train
41.954545
CityOfZion/neo-python-core
neocore/Cryptography/ECCurve.py
https://github.com/CityOfZion/neo-python-core/blob/786c02cc2f41712d70b1f064ae3d67f86167107f/neocore/Cryptography/ECCurve.py#L711-L740
def verify(self, message, pubkey, rnum, snum): """ Verify the signature for message m, pubkey Y, signature (r,s) r = xcoord(R) verify that : G*m+Y*r=R*s this is true because: { Y=G*x, and R=G*k, s=(m+x*r)/k } G*m+G*x*r = G*k*(m+x*r)/k -> G*(m+x*r) = G*(m+x*r) several ways to do the verification: r == xcoord[ G*(m/s) + Y*(r/s) ] <<< the standard way R * s == G*m + Y*r r == xcoord[ (G*m + Y*r)/s) ] """ m = self.GFn.value(message) r = self.GFn.value(rnum) s = self.GFn.value(snum) R = self.G * (m / s) + pubkey * (r / s) # alternative methods of verifying # RORG= self.ec.decompress(r, 0) # RR = self.G * m + pubkey * r # print "#1: %s .. %s" % (RR, RORG*s) # print "#2: %s .. %s" % (RR*(1/s), r) # print "#3: %s .. %s" % (R, r) return R.x == r
[ "def", "verify", "(", "self", ",", "message", ",", "pubkey", ",", "rnum", ",", "snum", ")", ":", "m", "=", "self", ".", "GFn", ".", "value", "(", "message", ")", "r", "=", "self", ".", "GFn", ".", "value", "(", "rnum", ")", "s", "=", "self", ".", "GFn", ".", "value", "(", "snum", ")", "R", "=", "self", ".", "G", "*", "(", "m", "/", "s", ")", "+", "pubkey", "*", "(", "r", "/", "s", ")", "# alternative methods of verifying", "# RORG= self.ec.decompress(r, 0)", "# RR = self.G * m + pubkey * r", "# print \"#1: %s .. %s\" % (RR, RORG*s)", "# print \"#2: %s .. %s\" % (RR*(1/s), r)", "# print \"#3: %s .. %s\" % (R, r)", "return", "R", ".", "x", "==", "r" ]
Verify the signature for message m, pubkey Y, signature (r,s) r = xcoord(R) verify that : G*m+Y*r=R*s this is true because: { Y=G*x, and R=G*k, s=(m+x*r)/k } G*m+G*x*r = G*k*(m+x*r)/k -> G*(m+x*r) = G*(m+x*r) several ways to do the verification: r == xcoord[ G*(m/s) + Y*(r/s) ] <<< the standard way R * s == G*m + Y*r r == xcoord[ (G*m + Y*r)/s) ]
[ "Verify", "the", "signature", "for", "message", "m", "pubkey", "Y", "signature", "(", "r", "s", ")", "r", "=", "xcoord", "(", "R", ")", "verify", "that", ":", "G", "*", "m", "+", "Y", "*", "r", "=", "R", "*", "s", "this", "is", "true", "because", ":", "{", "Y", "=", "G", "*", "x", "and", "R", "=", "G", "*", "k", "s", "=", "(", "m", "+", "x", "*", "r", ")", "/", "k", "}" ]
python
train
31.033333
HazyResearch/pdftotree
pdftotree/utils/pdf/vector_utils.py
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/vector_utils.py#L106-L114
def bound_bboxes(bboxes): """ Finds the minimal bbox that contains all given bboxes """ group_x0 = min(map(lambda l: l[x0], bboxes)) group_y0 = min(map(lambda l: l[y0], bboxes)) group_x1 = max(map(lambda l: l[x1], bboxes)) group_y1 = max(map(lambda l: l[y1], bboxes)) return (group_x0, group_y0, group_x1, group_y1)
[ "def", "bound_bboxes", "(", "bboxes", ")", ":", "group_x0", "=", "min", "(", "map", "(", "lambda", "l", ":", "l", "[", "x0", "]", ",", "bboxes", ")", ")", "group_y0", "=", "min", "(", "map", "(", "lambda", "l", ":", "l", "[", "y0", "]", ",", "bboxes", ")", ")", "group_x1", "=", "max", "(", "map", "(", "lambda", "l", ":", "l", "[", "x1", "]", ",", "bboxes", ")", ")", "group_y1", "=", "max", "(", "map", "(", "lambda", "l", ":", "l", "[", "y1", "]", ",", "bboxes", ")", ")", "return", "(", "group_x0", ",", "group_y0", ",", "group_x1", ",", "group_y1", ")" ]
Finds the minimal bbox that contains all given bboxes
[ "Finds", "the", "minimal", "bbox", "that", "contains", "all", "given", "bboxes" ]
python
train
37.666667
riga/tfdeploy
tfdeploy.py
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1908-L1913
def SegmentMax(a, ids): """ Segmented max op. """ func = lambda idxs: np.amax(a[idxs], axis=0) return seg_map(func, a, ids),
[ "def", "SegmentMax", "(", "a", ",", "ids", ")", ":", "func", "=", "lambda", "idxs", ":", "np", ".", "amax", "(", "a", "[", "idxs", "]", ",", "axis", "=", "0", ")", "return", "seg_map", "(", "func", ",", "a", ",", "ids", ")", "," ]
Segmented max op.
[ "Segmented", "max", "op", "." ]
python
train
23.166667
djgagne/hagelslag
hagelslag/processing/EnsembleProducts.py
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/EnsembleProducts.py#L631-L721
def load_data(self, grid_method="gamma", num_samples=1000, condition_threshold=0.5, zero_inflate=False, percentile=None): """ Reads the track forecasts and converts them to grid point values based on random sampling. Args: grid_method: "gamma" by default num_samples: Number of samples drawn from predicted pdf condition_threshold: Objects are not written to the grid if condition model probability is below this threshold. zero_inflate: Whether to sample zeros from a Bernoulli sampler based on the condition model probability percentile: If None, outputs the mean of the samples at each grid point, otherwise outputs the specified percentile from 0 to 100. Returns: 0 if tracks are successfully sampled on to grid. If no tracks are found, returns -1. """ self.percentile = percentile if self.track_forecasts == {}: self.load_track_forecasts() if self.track_forecasts == {}: return -1 if self.data is None: self.data = np.zeros((len(self.members), self.times.size, self.grid_shape[0], self.grid_shape[1]), dtype=np.float32) else: self.data[:] = 0 if grid_method in ["mean", "median", "samples"]: for m, member in enumerate(self.members): print("Sampling " + member) for track_forecast in self.track_forecasts[member]: times = track_forecast["properties"]["times"] for s, step in enumerate(track_forecast["features"]): forecast_pdf = np.array(step['properties'][self.variable + "_" + self.ensemble_name.replace(" ", "-")]) forecast_time = self.run_date + timedelta(hours=times[s]) t = np.where(self.times == forecast_time)[0][0] mask = np.array(step['properties']["masks"], dtype=int) i = np.array(step['properties']["i"], dtype=int) i = i[mask == 1] j = np.array(step['properties']["j"], dtype=int) j = j[mask == 1] if grid_method == "samples": intensities = np.array(step["properties"]["timesteps"], dtype=float)[mask == 1] rankings = np.argsort(intensities) samples = np.random.choice(self.forecast_bins, size=intensities.size, replace=True, p=forecast_pdf) self.data[m, t, i[rankings], j[rankings]] = samples else: if grid_method == "mean": forecast_value = np.sum(forecast_pdf * self.forecast_bins) elif grid_method == "median": forecast_cdf = np.cumsum(forecast_pdf) forecast_value = self.forecast_bins[np.argmin(np.abs(forecast_cdf - 0.5))] else: forecast_value = 0 self.data[m, t, i, j] = forecast_value if grid_method in ["gamma"]: full_condition_name = "condition_" + self.condition_model_name.replace(" ", "-") dist_model_name = self.variable + "_" + self.ensemble_name.replace(" ", "-") for m, member in enumerate(self.members): for track_forecast in self.track_forecasts[member]: times = track_forecast["properties"]["times"] for s, step in enumerate(track_forecast["features"]): forecast_params = step["properties"][dist_model_name] if self.condition_model_name is not None: condition = step["properties"][full_condition_name] else: condition = None forecast_time = self.run_date + timedelta(hours=times[s]) if forecast_time in self.times: t = np.where(self.times == forecast_time)[0][0] mask = np.array(step["properties"]["masks"], dtype=int) rankings = np.argsort(step["properties"]["timesteps"])[mask == 1] i = np.array(step["properties"]["i"], dtype=int)[mask == 1][rankings] j = np.array(step["properties"]["j"], dtype=int)[mask == 1][rankings] if rankings.size > 0: raw_samples = np.sort(gamma.rvs(forecast_params[0], loc=forecast_params[1], scale=forecast_params[2], size=(num_samples, rankings.size)), axis=1) if zero_inflate: raw_samples *= bernoulli.rvs(condition, size=(num_samples, rankings.size)) if percentile is None: samples = raw_samples.mean(axis=0) else: samples = np.percentile(raw_samples, percentile, axis=0) if condition is None or condition >= condition_threshold: self.data[m, t, i, j] = samples return 0
[ "def", "load_data", "(", "self", ",", "grid_method", "=", "\"gamma\"", ",", "num_samples", "=", "1000", ",", "condition_threshold", "=", "0.5", ",", "zero_inflate", "=", "False", ",", "percentile", "=", "None", ")", ":", "self", ".", "percentile", "=", "percentile", "if", "self", ".", "track_forecasts", "==", "{", "}", ":", "self", ".", "load_track_forecasts", "(", ")", "if", "self", ".", "track_forecasts", "==", "{", "}", ":", "return", "-", "1", "if", "self", ".", "data", "is", "None", ":", "self", ".", "data", "=", "np", ".", "zeros", "(", "(", "len", "(", "self", ".", "members", ")", ",", "self", ".", "times", ".", "size", ",", "self", ".", "grid_shape", "[", "0", "]", ",", "self", ".", "grid_shape", "[", "1", "]", ")", ",", "dtype", "=", "np", ".", "float32", ")", "else", ":", "self", ".", "data", "[", ":", "]", "=", "0", "if", "grid_method", "in", "[", "\"mean\"", ",", "\"median\"", ",", "\"samples\"", "]", ":", "for", "m", ",", "member", "in", "enumerate", "(", "self", ".", "members", ")", ":", "print", "(", "\"Sampling \"", "+", "member", ")", "for", "track_forecast", "in", "self", ".", "track_forecasts", "[", "member", "]", ":", "times", "=", "track_forecast", "[", "\"properties\"", "]", "[", "\"times\"", "]", "for", "s", ",", "step", "in", "enumerate", "(", "track_forecast", "[", "\"features\"", "]", ")", ":", "forecast_pdf", "=", "np", ".", "array", "(", "step", "[", "'properties'", "]", "[", "self", ".", "variable", "+", "\"_\"", "+", "self", ".", "ensemble_name", ".", "replace", "(", "\" \"", ",", "\"-\"", ")", "]", ")", "forecast_time", "=", "self", ".", "run_date", "+", "timedelta", "(", "hours", "=", "times", "[", "s", "]", ")", "t", "=", "np", ".", "where", "(", "self", ".", "times", "==", "forecast_time", ")", "[", "0", "]", "[", "0", "]", "mask", "=", "np", ".", "array", "(", "step", "[", "'properties'", "]", "[", "\"masks\"", "]", ",", "dtype", "=", "int", ")", "i", "=", "np", ".", "array", "(", "step", "[", "'properties'", "]", "[", "\"i\"", "]", ",", "dtype", "=", "int", ")", "i", "=", "i", "[", "mask", "==", "1", "]", "j", "=", "np", ".", "array", "(", "step", "[", "'properties'", "]", "[", "\"j\"", "]", ",", "dtype", "=", "int", ")", "j", "=", "j", "[", "mask", "==", "1", "]", "if", "grid_method", "==", "\"samples\"", ":", "intensities", "=", "np", ".", "array", "(", "step", "[", "\"properties\"", "]", "[", "\"timesteps\"", "]", ",", "dtype", "=", "float", ")", "[", "mask", "==", "1", "]", "rankings", "=", "np", ".", "argsort", "(", "intensities", ")", "samples", "=", "np", ".", "random", ".", "choice", "(", "self", ".", "forecast_bins", ",", "size", "=", "intensities", ".", "size", ",", "replace", "=", "True", ",", "p", "=", "forecast_pdf", ")", "self", ".", "data", "[", "m", ",", "t", ",", "i", "[", "rankings", "]", ",", "j", "[", "rankings", "]", "]", "=", "samples", "else", ":", "if", "grid_method", "==", "\"mean\"", ":", "forecast_value", "=", "np", ".", "sum", "(", "forecast_pdf", "*", "self", ".", "forecast_bins", ")", "elif", "grid_method", "==", "\"median\"", ":", "forecast_cdf", "=", "np", ".", "cumsum", "(", "forecast_pdf", ")", "forecast_value", "=", "self", ".", "forecast_bins", "[", "np", ".", "argmin", "(", "np", ".", "abs", "(", "forecast_cdf", "-", "0.5", ")", ")", "]", "else", ":", "forecast_value", "=", "0", "self", ".", "data", "[", "m", ",", "t", ",", "i", ",", "j", "]", "=", "forecast_value", "if", "grid_method", "in", "[", "\"gamma\"", "]", ":", "full_condition_name", "=", "\"condition_\"", "+", "self", ".", "condition_model_name", ".", "replace", "(", "\" \"", ",", "\"-\"", ")", "dist_model_name", "=", "self", ".", "variable", "+", "\"_\"", "+", "self", ".", "ensemble_name", ".", "replace", "(", "\" \"", ",", "\"-\"", ")", "for", "m", ",", "member", "in", "enumerate", "(", "self", ".", "members", ")", ":", "for", "track_forecast", "in", "self", ".", "track_forecasts", "[", "member", "]", ":", "times", "=", "track_forecast", "[", "\"properties\"", "]", "[", "\"times\"", "]", "for", "s", ",", "step", "in", "enumerate", "(", "track_forecast", "[", "\"features\"", "]", ")", ":", "forecast_params", "=", "step", "[", "\"properties\"", "]", "[", "dist_model_name", "]", "if", "self", ".", "condition_model_name", "is", "not", "None", ":", "condition", "=", "step", "[", "\"properties\"", "]", "[", "full_condition_name", "]", "else", ":", "condition", "=", "None", "forecast_time", "=", "self", ".", "run_date", "+", "timedelta", "(", "hours", "=", "times", "[", "s", "]", ")", "if", "forecast_time", "in", "self", ".", "times", ":", "t", "=", "np", ".", "where", "(", "self", ".", "times", "==", "forecast_time", ")", "[", "0", "]", "[", "0", "]", "mask", "=", "np", ".", "array", "(", "step", "[", "\"properties\"", "]", "[", "\"masks\"", "]", ",", "dtype", "=", "int", ")", "rankings", "=", "np", ".", "argsort", "(", "step", "[", "\"properties\"", "]", "[", "\"timesteps\"", "]", ")", "[", "mask", "==", "1", "]", "i", "=", "np", ".", "array", "(", "step", "[", "\"properties\"", "]", "[", "\"i\"", "]", ",", "dtype", "=", "int", ")", "[", "mask", "==", "1", "]", "[", "rankings", "]", "j", "=", "np", ".", "array", "(", "step", "[", "\"properties\"", "]", "[", "\"j\"", "]", ",", "dtype", "=", "int", ")", "[", "mask", "==", "1", "]", "[", "rankings", "]", "if", "rankings", ".", "size", ">", "0", ":", "raw_samples", "=", "np", ".", "sort", "(", "gamma", ".", "rvs", "(", "forecast_params", "[", "0", "]", ",", "loc", "=", "forecast_params", "[", "1", "]", ",", "scale", "=", "forecast_params", "[", "2", "]", ",", "size", "=", "(", "num_samples", ",", "rankings", ".", "size", ")", ")", ",", "axis", "=", "1", ")", "if", "zero_inflate", ":", "raw_samples", "*=", "bernoulli", ".", "rvs", "(", "condition", ",", "size", "=", "(", "num_samples", ",", "rankings", ".", "size", ")", ")", "if", "percentile", "is", "None", ":", "samples", "=", "raw_samples", ".", "mean", "(", "axis", "=", "0", ")", "else", ":", "samples", "=", "np", ".", "percentile", "(", "raw_samples", ",", "percentile", ",", "axis", "=", "0", ")", "if", "condition", "is", "None", "or", "condition", ">=", "condition_threshold", ":", "self", ".", "data", "[", "m", ",", "t", ",", "i", ",", "j", "]", "=", "samples", "return", "0" ]
Reads the track forecasts and converts them to grid point values based on random sampling. Args: grid_method: "gamma" by default num_samples: Number of samples drawn from predicted pdf condition_threshold: Objects are not written to the grid if condition model probability is below this threshold. zero_inflate: Whether to sample zeros from a Bernoulli sampler based on the condition model probability percentile: If None, outputs the mean of the samples at each grid point, otherwise outputs the specified percentile from 0 to 100. Returns: 0 if tracks are successfully sampled on to grid. If no tracks are found, returns -1.
[ "Reads", "the", "track", "forecasts", "and", "converts", "them", "to", "grid", "point", "values", "based", "on", "random", "sampling", "." ]
python
train
63.153846
ultrabug/py3status
py3status/util.py
https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/util.py#L18-L26
def hex_2_rgb(self, color): """ convert a hex color to rgb """ if not self.RE_HEX.match(color): color = "#FFF" if len(color) == 7: return (int(color[i : i + 2], 16) / 255 for i in [1, 3, 5]) return (int(c, 16) / 15 for c in color)
[ "def", "hex_2_rgb", "(", "self", ",", "color", ")", ":", "if", "not", "self", ".", "RE_HEX", ".", "match", "(", "color", ")", ":", "color", "=", "\"#FFF\"", "if", "len", "(", "color", ")", "==", "7", ":", "return", "(", "int", "(", "color", "[", "i", ":", "i", "+", "2", "]", ",", "16", ")", "/", "255", "for", "i", "in", "[", "1", ",", "3", ",", "5", "]", ")", "return", "(", "int", "(", "c", ",", "16", ")", "/", "15", "for", "c", "in", "color", ")" ]
convert a hex color to rgb
[ "convert", "a", "hex", "color", "to", "rgb" ]
python
train
32.666667
google/prettytensor
prettytensor/layers.py
https://github.com/google/prettytensor/blob/75daa0b11252590f548da5647addc0ea610c4c45/prettytensor/layers.py#L31-L72
def apply_activation( books, x, activation, activation_args=(), activation_kwargs=None): """Returns activation(x, *activation_args, **activation_kwargs). This applies the given activation and adds useful summaries specific to the activation. Args: books: The bookkeeper. x: The tensor to apply activation to. activation: An activation function. activation_args: Optional additional arguments for the activation. activation_kwargs: Optional keyword args for activation. Returns: A tensor with activation applied to x. """ if activation is None: return x if activation_kwargs is None: activation_kwargs = {} y = activation(x, *activation_args, **activation_kwargs) if activation in (tf.nn.relu, functions.leaky_relu, functions.softplus): books.add_scalar_summary( tf.reduce_mean(tf.cast(tf.less(x, 0.0), tf.float32)), '%s/zeros' % y.op.name) elif activation is tf.nn.relu6: books.add_scalar_summary( tf.reduce_mean(tf.cast(tf.less(x, 0.0), tf.float32)), '%s/zeros' % y.op.name) books.add_scalar_summary( tf.reduce_mean(tf.cast(tf.greater(x, 6.0), tf.float32)), '%s/sixes' % y.op.name) elif activation in (functions.l2_normalize, tf.nn.l2_normalize, functions.l1_normalize): books.add_scalar_summary( tf.reduce_mean(tf.sqrt(tf.reduce_sum( tf.square(x), 1))), '%s/length' % y.op.name) return y
[ "def", "apply_activation", "(", "books", ",", "x", ",", "activation", ",", "activation_args", "=", "(", ")", ",", "activation_kwargs", "=", "None", ")", ":", "if", "activation", "is", "None", ":", "return", "x", "if", "activation_kwargs", "is", "None", ":", "activation_kwargs", "=", "{", "}", "y", "=", "activation", "(", "x", ",", "*", "activation_args", ",", "*", "*", "activation_kwargs", ")", "if", "activation", "in", "(", "tf", ".", "nn", ".", "relu", ",", "functions", ".", "leaky_relu", ",", "functions", ".", "softplus", ")", ":", "books", ".", "add_scalar_summary", "(", "tf", ".", "reduce_mean", "(", "tf", ".", "cast", "(", "tf", ".", "less", "(", "x", ",", "0.0", ")", ",", "tf", ".", "float32", ")", ")", ",", "'%s/zeros'", "%", "y", ".", "op", ".", "name", ")", "elif", "activation", "is", "tf", ".", "nn", ".", "relu6", ":", "books", ".", "add_scalar_summary", "(", "tf", ".", "reduce_mean", "(", "tf", ".", "cast", "(", "tf", ".", "less", "(", "x", ",", "0.0", ")", ",", "tf", ".", "float32", ")", ")", ",", "'%s/zeros'", "%", "y", ".", "op", ".", "name", ")", "books", ".", "add_scalar_summary", "(", "tf", ".", "reduce_mean", "(", "tf", ".", "cast", "(", "tf", ".", "greater", "(", "x", ",", "6.0", ")", ",", "tf", ".", "float32", ")", ")", ",", "'%s/sixes'", "%", "y", ".", "op", ".", "name", ")", "elif", "activation", "in", "(", "functions", ".", "l2_normalize", ",", "tf", ".", "nn", ".", "l2_normalize", ",", "functions", ".", "l1_normalize", ")", ":", "books", ".", "add_scalar_summary", "(", "tf", ".", "reduce_mean", "(", "tf", ".", "sqrt", "(", "tf", ".", "reduce_sum", "(", "tf", ".", "square", "(", "x", ")", ",", "1", ")", ")", ")", ",", "'%s/length'", "%", "y", ".", "op", ".", "name", ")", "return", "y" ]
Returns activation(x, *activation_args, **activation_kwargs). This applies the given activation and adds useful summaries specific to the activation. Args: books: The bookkeeper. x: The tensor to apply activation to. activation: An activation function. activation_args: Optional additional arguments for the activation. activation_kwargs: Optional keyword args for activation. Returns: A tensor with activation applied to x.
[ "Returns", "activation", "(", "x", "*", "activation_args", "**", "activation_kwargs", ")", "." ]
python
train
34.142857
contentful-labs/contentful.py
contentful/cda/serialization.py
https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/serialization.py#L226-L242
def create_array(self, json): """Create :class:`.resources.Array` from JSON. :param json: JSON dict. :return: Array instance. """ result = Array(json['sys']) result.total = json['total'] result.skip = json['skip'] result.limit = json['limit'] result.items = [] result.items_mapped = {'Asset': {}, 'Entry': {}} self.process_array_items(result, json) self.process_array_includes(result, json) return result
[ "def", "create_array", "(", "self", ",", "json", ")", ":", "result", "=", "Array", "(", "json", "[", "'sys'", "]", ")", "result", ".", "total", "=", "json", "[", "'total'", "]", "result", ".", "skip", "=", "json", "[", "'skip'", "]", "result", ".", "limit", "=", "json", "[", "'limit'", "]", "result", ".", "items", "=", "[", "]", "result", ".", "items_mapped", "=", "{", "'Asset'", ":", "{", "}", ",", "'Entry'", ":", "{", "}", "}", "self", ".", "process_array_items", "(", "result", ",", "json", ")", "self", ".", "process_array_includes", "(", "result", ",", "json", ")", "return", "result" ]
Create :class:`.resources.Array` from JSON. :param json: JSON dict. :return: Array instance.
[ "Create", ":", "class", ":", ".", "resources", ".", "Array", "from", "JSON", "." ]
python
train
29.117647
openstack/proliantutils
proliantutils/hpssa/objects.py
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/hpssa/objects.py#L428-L449
def _get_erase_command(self, drive, pattern): """Return the command arguments based on the pattern. Erase command examples: 1) Sanitize: "ssacli ctrl slot=0 pd 1I:1:1 modify erase erasepattern=overwrite unrestricted=off forced" 2) Zeros: "ssacli ctrl slot=0 pd 1I:1:1 modify erase erasepattern=zero forced" :param drive: A string with comma separated list of drives. :param pattern: A string which defines the type of erase. :returns: A list of ssacli command arguments. """ cmd_args = [] cmd_args.append("pd %s" % drive) cmd_args.extend(['modify', 'erase', pattern]) if pattern != 'erasepattern=zero': cmd_args.append('unrestricted=off') cmd_args.append('forced') return cmd_args
[ "def", "_get_erase_command", "(", "self", ",", "drive", ",", "pattern", ")", ":", "cmd_args", "=", "[", "]", "cmd_args", ".", "append", "(", "\"pd %s\"", "%", "drive", ")", "cmd_args", ".", "extend", "(", "[", "'modify'", ",", "'erase'", ",", "pattern", "]", ")", "if", "pattern", "!=", "'erasepattern=zero'", ":", "cmd_args", ".", "append", "(", "'unrestricted=off'", ")", "cmd_args", ".", "append", "(", "'forced'", ")", "return", "cmd_args" ]
Return the command arguments based on the pattern. Erase command examples: 1) Sanitize: "ssacli ctrl slot=0 pd 1I:1:1 modify erase erasepattern=overwrite unrestricted=off forced" 2) Zeros: "ssacli ctrl slot=0 pd 1I:1:1 modify erase erasepattern=zero forced" :param drive: A string with comma separated list of drives. :param pattern: A string which defines the type of erase. :returns: A list of ssacli command arguments.
[ "Return", "the", "command", "arguments", "based", "on", "the", "pattern", "." ]
python
train
37.636364
log2timeline/plaso
plaso/parsers/docker.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/docker.py#L94-L108
def _GetIdentifierFromPath(self, parser_mediator): """Extracts a container or a graph ID from a JSON file's path. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. Returns: str: container or graph identifier. """ file_entry = parser_mediator.GetFileEntry() path = file_entry.path_spec.location file_system = file_entry.GetFileSystem() path_segments = file_system.SplitPath(path) return path_segments[-2]
[ "def", "_GetIdentifierFromPath", "(", "self", ",", "parser_mediator", ")", ":", "file_entry", "=", "parser_mediator", ".", "GetFileEntry", "(", ")", "path", "=", "file_entry", ".", "path_spec", ".", "location", "file_system", "=", "file_entry", ".", "GetFileSystem", "(", ")", "path_segments", "=", "file_system", ".", "SplitPath", "(", "path", ")", "return", "path_segments", "[", "-", "2", "]" ]
Extracts a container or a graph ID from a JSON file's path. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. Returns: str: container or graph identifier.
[ "Extracts", "a", "container", "or", "a", "graph", "ID", "from", "a", "JSON", "file", "s", "path", "." ]
python
train
35.066667
RudolfCardinal/pythonlib
cardinal_pythonlib/fileops.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/fileops.py#L438-L468
def exists_locked(filepath: str) -> Tuple[bool, bool]: """ Checks if a file is locked by opening it in append mode. (If no exception is thrown in that situation, then the file is not locked.) Args: filepath: file to check Returns: tuple: ``(exists, locked)`` See https://www.calazan.com/how-to-check-if-a-file-is-locked-in-python/. """ exists = False locked = None file_object = None if os.path.exists(filepath): exists = True locked = True try: buffer_size = 8 # Opening file in append mode and read the first 8 characters. file_object = open(filepath, 'a', buffer_size) if file_object: locked = False # exists and not locked except IOError: pass finally: if file_object: file_object.close() return exists, locked
[ "def", "exists_locked", "(", "filepath", ":", "str", ")", "->", "Tuple", "[", "bool", ",", "bool", "]", ":", "exists", "=", "False", "locked", "=", "None", "file_object", "=", "None", "if", "os", ".", "path", ".", "exists", "(", "filepath", ")", ":", "exists", "=", "True", "locked", "=", "True", "try", ":", "buffer_size", "=", "8", "# Opening file in append mode and read the first 8 characters.", "file_object", "=", "open", "(", "filepath", ",", "'a'", ",", "buffer_size", ")", "if", "file_object", ":", "locked", "=", "False", "# exists and not locked", "except", "IOError", ":", "pass", "finally", ":", "if", "file_object", ":", "file_object", ".", "close", "(", ")", "return", "exists", ",", "locked" ]
Checks if a file is locked by opening it in append mode. (If no exception is thrown in that situation, then the file is not locked.) Args: filepath: file to check Returns: tuple: ``(exists, locked)`` See https://www.calazan.com/how-to-check-if-a-file-is-locked-in-python/.
[ "Checks", "if", "a", "file", "is", "locked", "by", "opening", "it", "in", "append", "mode", ".", "(", "If", "no", "exception", "is", "thrown", "in", "that", "situation", "then", "the", "file", "is", "not", "locked", ".", ")" ]
python
train
28.903226
annoviko/pyclustering
pyclustering/cluster/kmeans.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/kmeans.py#L544-L557
def __calculate_dataset_difference(self, amount_clusters): """! @brief Calculate distance from each point to each cluster center. """ dataset_differences = numpy.zeros((amount_clusters, len(self.__pointer_data))) for index_center in range(amount_clusters): if self.__metric.get_type() != type_metric.USER_DEFINED: dataset_differences[index_center] = self.__metric(self.__pointer_data, self.__centers[index_center]) else: dataset_differences[index_center] = [ self.__metric(point, self.__centers[index_center]) for point in self.__pointer_data ] return dataset_differences
[ "def", "__calculate_dataset_difference", "(", "self", ",", "amount_clusters", ")", ":", "dataset_differences", "=", "numpy", ".", "zeros", "(", "(", "amount_clusters", ",", "len", "(", "self", ".", "__pointer_data", ")", ")", ")", "for", "index_center", "in", "range", "(", "amount_clusters", ")", ":", "if", "self", ".", "__metric", ".", "get_type", "(", ")", "!=", "type_metric", ".", "USER_DEFINED", ":", "dataset_differences", "[", "index_center", "]", "=", "self", ".", "__metric", "(", "self", ".", "__pointer_data", ",", "self", ".", "__centers", "[", "index_center", "]", ")", "else", ":", "dataset_differences", "[", "index_center", "]", "=", "[", "self", ".", "__metric", "(", "point", ",", "self", ".", "__centers", "[", "index_center", "]", ")", "for", "point", "in", "self", ".", "__pointer_data", "]", "return", "dataset_differences" ]
! @brief Calculate distance from each point to each cluster center.
[ "!" ]
python
valid
52.214286
inasafe/inasafe
safe/report/extractors/analysis_provenance_details.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/report/extractors/analysis_provenance_details.py#L228-L339
def analysis_provenance_details_simplified_extractor( impact_report, component_metadata): """Extracting simplified version of provenance details of layers. This extractor will produce provenance details which will be displayed in the main report. :param impact_report: the impact report that acts as a proxy to fetch all the data that extractor needed :type impact_report: safe.report.impact_report.ImpactReport :param component_metadata: the component metadata. Used to obtain information about the component we want to render :type component_metadata: safe.report.report_metadata. ReportComponentsMetadata :return: context for rendering phase :rtype: dict .. versionadded:: 4.0 """ context = {} extra_args = component_metadata.extra_args default_source = resolve_from_dictionary( extra_args, ['defaults', 'source']) default_reference = resolve_from_dictionary( extra_args, ['defaults', 'reference']) provenance_format_args = resolve_from_dictionary( extra_args, 'provenance_format') hazard_keywords = impact_report.impact_function.provenance[ 'hazard_keywords'] header = resolve_from_dictionary( provenance_format_args, 'hazard_header') provenance_format = resolve_from_dictionary( provenance_format_args, 'hazard_format') hazard_provenance = { 'header': header, 'provenance': provenance_format.format( layer_name=hazard_keywords.get('title'), source=QgsDataSourceUri.removePassword( decode_full_layer_uri(hazard_keywords.get('source'))[0] or default_source)) } exposure_keywords = impact_report.impact_function.provenance[ 'exposure_keywords'] header = resolve_from_dictionary( provenance_format_args, 'exposure_header') provenance_format = resolve_from_dictionary( provenance_format_args, 'exposure_format') exposure_provenance = { 'header': header, 'provenance': provenance_format.format( layer_name=exposure_keywords.get('title'), source=QgsDataSourceUri.removePassword( decode_full_layer_uri(exposure_keywords.get('source'))[0] or default_source)) } aggregation_keywords = impact_report.impact_function.provenance[ 'aggregation_keywords'] header = resolve_from_dictionary( provenance_format_args, 'aggregation_header') provenance_format = resolve_from_dictionary( provenance_format_args, 'aggregation_format') # only if aggregation layer used if aggregation_keywords: provenance_string = provenance_format.format( layer_name=aggregation_keywords.get('title'), source=QgsDataSourceUri.removePassword( decode_full_layer_uri(aggregation_keywords.get('source'))[0] or default_source)) else: aggregation_not_used = resolve_from_dictionary( extra_args, ['defaults', 'aggregation_not_used']) provenance_string = aggregation_not_used aggregation_provenance = { 'header': header, 'provenance': provenance_string } impact_function_name = impact_report.impact_function.name header = resolve_from_dictionary( provenance_format_args, 'impact_function_header') provenance_format = resolve_from_dictionary( provenance_format_args, 'impact_function_format') impact_function_provenance = { 'header': header, 'provenance': provenance_format.format( impact_function_name=impact_function_name, reference=default_reference) } provenance_detail = OrderedDict() provenance_detail['hazard'] = hazard_provenance provenance_detail['exposure'] = exposure_provenance provenance_detail['aggregation'] = aggregation_provenance provenance_detail['impact_function'] = impact_function_provenance analysis_details_header = resolve_from_dictionary( extra_args, ['header', 'analysis_detail']) context['component_key'] = component_metadata.key context.update({ 'header': analysis_details_header, 'details': provenance_detail }) return context
[ "def", "analysis_provenance_details_simplified_extractor", "(", "impact_report", ",", "component_metadata", ")", ":", "context", "=", "{", "}", "extra_args", "=", "component_metadata", ".", "extra_args", "default_source", "=", "resolve_from_dictionary", "(", "extra_args", ",", "[", "'defaults'", ",", "'source'", "]", ")", "default_reference", "=", "resolve_from_dictionary", "(", "extra_args", ",", "[", "'defaults'", ",", "'reference'", "]", ")", "provenance_format_args", "=", "resolve_from_dictionary", "(", "extra_args", ",", "'provenance_format'", ")", "hazard_keywords", "=", "impact_report", ".", "impact_function", ".", "provenance", "[", "'hazard_keywords'", "]", "header", "=", "resolve_from_dictionary", "(", "provenance_format_args", ",", "'hazard_header'", ")", "provenance_format", "=", "resolve_from_dictionary", "(", "provenance_format_args", ",", "'hazard_format'", ")", "hazard_provenance", "=", "{", "'header'", ":", "header", ",", "'provenance'", ":", "provenance_format", ".", "format", "(", "layer_name", "=", "hazard_keywords", ".", "get", "(", "'title'", ")", ",", "source", "=", "QgsDataSourceUri", ".", "removePassword", "(", "decode_full_layer_uri", "(", "hazard_keywords", ".", "get", "(", "'source'", ")", ")", "[", "0", "]", "or", "default_source", ")", ")", "}", "exposure_keywords", "=", "impact_report", ".", "impact_function", ".", "provenance", "[", "'exposure_keywords'", "]", "header", "=", "resolve_from_dictionary", "(", "provenance_format_args", ",", "'exposure_header'", ")", "provenance_format", "=", "resolve_from_dictionary", "(", "provenance_format_args", ",", "'exposure_format'", ")", "exposure_provenance", "=", "{", "'header'", ":", "header", ",", "'provenance'", ":", "provenance_format", ".", "format", "(", "layer_name", "=", "exposure_keywords", ".", "get", "(", "'title'", ")", ",", "source", "=", "QgsDataSourceUri", ".", "removePassword", "(", "decode_full_layer_uri", "(", "exposure_keywords", ".", "get", "(", "'source'", ")", ")", "[", "0", "]", "or", "default_source", ")", ")", "}", "aggregation_keywords", "=", "impact_report", ".", "impact_function", ".", "provenance", "[", "'aggregation_keywords'", "]", "header", "=", "resolve_from_dictionary", "(", "provenance_format_args", ",", "'aggregation_header'", ")", "provenance_format", "=", "resolve_from_dictionary", "(", "provenance_format_args", ",", "'aggregation_format'", ")", "# only if aggregation layer used", "if", "aggregation_keywords", ":", "provenance_string", "=", "provenance_format", ".", "format", "(", "layer_name", "=", "aggregation_keywords", ".", "get", "(", "'title'", ")", ",", "source", "=", "QgsDataSourceUri", ".", "removePassword", "(", "decode_full_layer_uri", "(", "aggregation_keywords", ".", "get", "(", "'source'", ")", ")", "[", "0", "]", "or", "default_source", ")", ")", "else", ":", "aggregation_not_used", "=", "resolve_from_dictionary", "(", "extra_args", ",", "[", "'defaults'", ",", "'aggregation_not_used'", "]", ")", "provenance_string", "=", "aggregation_not_used", "aggregation_provenance", "=", "{", "'header'", ":", "header", ",", "'provenance'", ":", "provenance_string", "}", "impact_function_name", "=", "impact_report", ".", "impact_function", ".", "name", "header", "=", "resolve_from_dictionary", "(", "provenance_format_args", ",", "'impact_function_header'", ")", "provenance_format", "=", "resolve_from_dictionary", "(", "provenance_format_args", ",", "'impact_function_format'", ")", "impact_function_provenance", "=", "{", "'header'", ":", "header", ",", "'provenance'", ":", "provenance_format", ".", "format", "(", "impact_function_name", "=", "impact_function_name", ",", "reference", "=", "default_reference", ")", "}", "provenance_detail", "=", "OrderedDict", "(", ")", "provenance_detail", "[", "'hazard'", "]", "=", "hazard_provenance", "provenance_detail", "[", "'exposure'", "]", "=", "exposure_provenance", "provenance_detail", "[", "'aggregation'", "]", "=", "aggregation_provenance", "provenance_detail", "[", "'impact_function'", "]", "=", "impact_function_provenance", "analysis_details_header", "=", "resolve_from_dictionary", "(", "extra_args", ",", "[", "'header'", ",", "'analysis_detail'", "]", ")", "context", "[", "'component_key'", "]", "=", "component_metadata", ".", "key", "context", ".", "update", "(", "{", "'header'", ":", "analysis_details_header", ",", "'details'", ":", "provenance_detail", "}", ")", "return", "context" ]
Extracting simplified version of provenance details of layers. This extractor will produce provenance details which will be displayed in the main report. :param impact_report: the impact report that acts as a proxy to fetch all the data that extractor needed :type impact_report: safe.report.impact_report.ImpactReport :param component_metadata: the component metadata. Used to obtain information about the component we want to render :type component_metadata: safe.report.report_metadata. ReportComponentsMetadata :return: context for rendering phase :rtype: dict .. versionadded:: 4.0
[ "Extracting", "simplified", "version", "of", "provenance", "details", "of", "layers", "." ]
python
train
37.25
williamfzc/ConnectionTracer
ConnectionTracer/utils.py
https://github.com/williamfzc/ConnectionTracer/blob/190003e374d6903cb82d2d21a1378979dc419ed3/ConnectionTracer/utils.py#L36-L48
def decode_response(content: bytes) -> set: """ adb response text -> device set """ content = content[4:].decode(config.ENCODING) if '\t' not in content and '\n' not in content: return set() connected_devices = set() device_list = [i for i in content.split('\n') if i] for each_device in device_list: device_id, device_status = each_device.split('\t') if device_status == 'device': connected_devices.add(device_id) return connected_devices
[ "def", "decode_response", "(", "content", ":", "bytes", ")", "->", "set", ":", "content", "=", "content", "[", "4", ":", "]", ".", "decode", "(", "config", ".", "ENCODING", ")", "if", "'\\t'", "not", "in", "content", "and", "'\\n'", "not", "in", "content", ":", "return", "set", "(", ")", "connected_devices", "=", "set", "(", ")", "device_list", "=", "[", "i", "for", "i", "in", "content", ".", "split", "(", "'\\n'", ")", "if", "i", "]", "for", "each_device", "in", "device_list", ":", "device_id", ",", "device_status", "=", "each_device", ".", "split", "(", "'\\t'", ")", "if", "device_status", "==", "'device'", ":", "connected_devices", ".", "add", "(", "device_id", ")", "return", "connected_devices" ]
adb response text -> device set
[ "adb", "response", "text", "-", ">", "device", "set" ]
python
train
37.846154
flo-compbio/genometools
genometools/gcloud/storage.py
https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/gcloud/storage.py#L64-L70
def get_files(client, bucket, prefix=''): """Lists files/objects on a bucket. TODO: docstring""" bucket = client.get_bucket(bucket) files = list(bucket.list_blobs(prefix=prefix)) return files
[ "def", "get_files", "(", "client", ",", "bucket", ",", "prefix", "=", "''", ")", ":", "bucket", "=", "client", ".", "get_bucket", "(", "bucket", ")", "files", "=", "list", "(", "bucket", ".", "list_blobs", "(", "prefix", "=", "prefix", ")", ")", "return", "files" ]
Lists files/objects on a bucket. TODO: docstring
[ "Lists", "files", "/", "objects", "on", "a", "bucket", ".", "TODO", ":", "docstring" ]
python
train
30.571429
arubertoson/maya-launcher
mayalauncher.py
https://github.com/arubertoson/maya-launcher/blob/9bd82cce7edf4afb803dd8044107a324e93f197f/mayalauncher.py#L167-L179
def build_config(config_file=get_system_config_directory()): """ Construct the config object from necessary elements. """ config = Config(config_file, allow_no_value=True) application_versions = find_applications_on_system() # Add found versions to config if they don't exist. Versions found # in the config file takes precedence over versions found in PATH. for item in application_versions.iteritems(): if not config.has_option(Config.EXECUTABLES, item[0]): config.set(Config.EXECUTABLES, item[0], item[1]) return config
[ "def", "build_config", "(", "config_file", "=", "get_system_config_directory", "(", ")", ")", ":", "config", "=", "Config", "(", "config_file", ",", "allow_no_value", "=", "True", ")", "application_versions", "=", "find_applications_on_system", "(", ")", "# Add found versions to config if they don't exist. Versions found\r", "# in the config file takes precedence over versions found in PATH.\r", "for", "item", "in", "application_versions", ".", "iteritems", "(", ")", ":", "if", "not", "config", ".", "has_option", "(", "Config", ".", "EXECUTABLES", ",", "item", "[", "0", "]", ")", ":", "config", ".", "set", "(", "Config", ".", "EXECUTABLES", ",", "item", "[", "0", "]", ",", "item", "[", "1", "]", ")", "return", "config" ]
Construct the config object from necessary elements.
[ "Construct", "the", "config", "object", "from", "necessary", "elements", "." ]
python
train
44.538462
sdss/sdss_access
python/sdss_access/sync/rsync.py
https://github.com/sdss/sdss_access/blob/76375bbf37d39d2e4ccbed90bdfa9a4298784470/python/sdss_access/sync/rsync.py#L160-L167
def commit(self, offset=None, limit=None, dryrun=False): """ Start the rsync download """ self.stream.command = "rsync -avRK --files-from={path} {source} {destination}" self.stream.append_tasks_to_streamlets(offset=offset, limit=limit) self.stream.commit_streamlets() self.stream.run_streamlets() self.stream.reset_streamlet()
[ "def", "commit", "(", "self", ",", "offset", "=", "None", ",", "limit", "=", "None", ",", "dryrun", "=", "False", ")", ":", "self", ".", "stream", ".", "command", "=", "\"rsync -avRK --files-from={path} {source} {destination}\"", "self", ".", "stream", ".", "append_tasks_to_streamlets", "(", "offset", "=", "offset", ",", "limit", "=", "limit", ")", "self", ".", "stream", ".", "commit_streamlets", "(", ")", "self", ".", "stream", ".", "run_streamlets", "(", ")", "self", ".", "stream", ".", "reset_streamlet", "(", ")" ]
Start the rsync download
[ "Start", "the", "rsync", "download" ]
python
train
46
inspirehep/harvesting-kit
harvestingkit/bibrecord.py
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/bibrecord.py#L1722-L1725
def _record_sort_by_indicators(record): """Sort the fields inside the record by indicators.""" for tag, fields in record.items(): record[tag] = _fields_sort_by_indicators(fields)
[ "def", "_record_sort_by_indicators", "(", "record", ")", ":", "for", "tag", ",", "fields", "in", "record", ".", "items", "(", ")", ":", "record", "[", "tag", "]", "=", "_fields_sort_by_indicators", "(", "fields", ")" ]
Sort the fields inside the record by indicators.
[ "Sort", "the", "fields", "inside", "the", "record", "by", "indicators", "." ]
python
valid
47.75
mirukan/whratio
whratio/__main__.py
https://github.com/mirukan/whratio/blob/e19cf7346351649d196d2eb3369870841f7bfea5/whratio/__main__.py#L33-L68
def main(): "Process CLI arguments and call appropriate functions." try: args = docopt.docopt(__doc__, version=__about__.__version__) except docopt.DocoptExit: if len(sys.argv) > 1: print(f"{Fore.RED}Invalid command syntax, " f"check help:{Fore.RESET}\n") print(__doc__) sys.exit(1) print_all = False if not (args["--int-width"] or args["--int-height"] or args["--decimal"]): print_all = True width = float(args["WIDTH"]) height = float(args["HEIGHT"]) as_int_ = as_int(width, height) as_float_ = as_float(width, height) if args["--ndigits"]: as_float_ = round(as_float_, int(args["--ndigits"])) to_print = [] if args["--int-width"] or print_all: to_print.append(f"{Fore.BLUE}{as_int_[0]!s}") if args["--int-height"] or print_all: to_print.append(f"{Fore.BLUE}{as_int_[1]!s}") if args["--decimal"] or print_all: to_print.append(f"{Fore.MAGENTA}{as_float_!s}") print(" ".join(to_print))
[ "def", "main", "(", ")", ":", "try", ":", "args", "=", "docopt", ".", "docopt", "(", "__doc__", ",", "version", "=", "__about__", ".", "__version__", ")", "except", "docopt", ".", "DocoptExit", ":", "if", "len", "(", "sys", ".", "argv", ")", ">", "1", ":", "print", "(", "f\"{Fore.RED}Invalid command syntax, \"", "f\"check help:{Fore.RESET}\\n\"", ")", "print", "(", "__doc__", ")", "sys", ".", "exit", "(", "1", ")", "print_all", "=", "False", "if", "not", "(", "args", "[", "\"--int-width\"", "]", "or", "args", "[", "\"--int-height\"", "]", "or", "args", "[", "\"--decimal\"", "]", ")", ":", "print_all", "=", "True", "width", "=", "float", "(", "args", "[", "\"WIDTH\"", "]", ")", "height", "=", "float", "(", "args", "[", "\"HEIGHT\"", "]", ")", "as_int_", "=", "as_int", "(", "width", ",", "height", ")", "as_float_", "=", "as_float", "(", "width", ",", "height", ")", "if", "args", "[", "\"--ndigits\"", "]", ":", "as_float_", "=", "round", "(", "as_float_", ",", "int", "(", "args", "[", "\"--ndigits\"", "]", ")", ")", "to_print", "=", "[", "]", "if", "args", "[", "\"--int-width\"", "]", "or", "print_all", ":", "to_print", ".", "append", "(", "f\"{Fore.BLUE}{as_int_[0]!s}\"", ")", "if", "args", "[", "\"--int-height\"", "]", "or", "print_all", ":", "to_print", ".", "append", "(", "f\"{Fore.BLUE}{as_int_[1]!s}\"", ")", "if", "args", "[", "\"--decimal\"", "]", "or", "print_all", ":", "to_print", ".", "append", "(", "f\"{Fore.MAGENTA}{as_float_!s}\"", ")", "print", "(", "\" \"", ".", "join", "(", "to_print", ")", ")" ]
Process CLI arguments and call appropriate functions.
[ "Process", "CLI", "arguments", "and", "call", "appropriate", "functions", "." ]
python
train
28.555556
econ-ark/HARK
HARK/interpolation.py
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/interpolation.py#L3113-L3195
def findSector(self,x,y): ''' Finds the quadrilateral "sector" for each (x,y) point in the input. Only called as a subroutine of _evaluate(). Parameters ---------- x : np.array Values whose sector should be found. y : np.array Values whose sector should be found. Should be same size as x. Returns ------- x_pos : np.array Sector x-coordinates for each point of the input, of the same size. y_pos : np.array Sector y-coordinates for each point of the input, of the same size. ''' # Initialize the sector guess m = x.size x_pos_guess = (np.ones(m)*self.x_n/2).astype(int) y_pos_guess = (np.ones(m)*self.y_n/2).astype(int) # Define a function that checks whether a set of points violates a linear # boundary defined by (x_bound_1,y_bound_1) and (x_bound_2,y_bound_2), # where the latter is *COUNTER CLOCKWISE* from the former. Returns # 1 if the point is outside the boundary and 0 otherwise. violationCheck = lambda x_check,y_check,x_bound_1,y_bound_1,x_bound_2,y_bound_2 : ( (y_bound_2 - y_bound_1)*x_check - (x_bound_2 - x_bound_1)*y_check > x_bound_1*y_bound_2 - y_bound_1*x_bound_2 ) + 0 # Identify the correct sector for each point to be evaluated these = np.ones(m,dtype=bool) max_loops = self.x_n + self.y_n loops = 0 while np.any(these) and loops < max_loops: # Get coordinates for the four vertices: (xA,yA),...,(xD,yD) x_temp = x[these] y_temp = y[these] xA = self.x_values[x_pos_guess[these],y_pos_guess[these]] xB = self.x_values[x_pos_guess[these]+1,y_pos_guess[these]] xC = self.x_values[x_pos_guess[these],y_pos_guess[these]+1] xD = self.x_values[x_pos_guess[these]+1,y_pos_guess[these]+1] yA = self.y_values[x_pos_guess[these],y_pos_guess[these]] yB = self.y_values[x_pos_guess[these]+1,y_pos_guess[these]] yC = self.y_values[x_pos_guess[these],y_pos_guess[these]+1] yD = self.y_values[x_pos_guess[these]+1,y_pos_guess[these]+1] # Check the "bounding box" for the sector: is this guess plausible? move_down = (y_temp < np.minimum(yA,yB)) + 0 move_right = (x_temp > np.maximum(xB,xD)) + 0 move_up = (y_temp > np.maximum(yC,yD)) + 0 move_left = (x_temp < np.minimum(xA,xC)) + 0 # Check which boundaries are violated (and thus where to look next) c = (move_down + move_right + move_up + move_left) == 0 move_down[c] = violationCheck(x_temp[c],y_temp[c],xA[c],yA[c],xB[c],yB[c]) move_right[c] = violationCheck(x_temp[c],y_temp[c],xB[c],yB[c],xD[c],yD[c]) move_up[c] = violationCheck(x_temp[c],y_temp[c],xD[c],yD[c],xC[c],yC[c]) move_left[c] = violationCheck(x_temp[c],y_temp[c],xC[c],yC[c],xA[c],yA[c]) # Update the sector guess based on the violations x_pos_next = x_pos_guess[these] - move_left + move_right x_pos_next[x_pos_next < 0] = 0 x_pos_next[x_pos_next > (self.x_n-2)] = self.x_n-2 y_pos_next = y_pos_guess[these] - move_down + move_up y_pos_next[y_pos_next < 0] = 0 y_pos_next[y_pos_next > (self.y_n-2)] = self.y_n-2 # Check which sectors have not changed, and mark them as complete no_move = np.array(np.logical_and(x_pos_guess[these] == x_pos_next, y_pos_guess[these] == y_pos_next)) x_pos_guess[these] = x_pos_next y_pos_guess[these] = y_pos_next temp = these.nonzero() these[temp[0][no_move]] = False # Move to the next iteration of the search loops += 1 # Return the output x_pos = x_pos_guess y_pos = y_pos_guess return x_pos, y_pos
[ "def", "findSector", "(", "self", ",", "x", ",", "y", ")", ":", "# Initialize the sector guess", "m", "=", "x", ".", "size", "x_pos_guess", "=", "(", "np", ".", "ones", "(", "m", ")", "*", "self", ".", "x_n", "/", "2", ")", ".", "astype", "(", "int", ")", "y_pos_guess", "=", "(", "np", ".", "ones", "(", "m", ")", "*", "self", ".", "y_n", "/", "2", ")", ".", "astype", "(", "int", ")", "# Define a function that checks whether a set of points violates a linear", "# boundary defined by (x_bound_1,y_bound_1) and (x_bound_2,y_bound_2),", "# where the latter is *COUNTER CLOCKWISE* from the former. Returns", "# 1 if the point is outside the boundary and 0 otherwise.", "violationCheck", "=", "lambda", "x_check", ",", "y_check", ",", "x_bound_1", ",", "y_bound_1", ",", "x_bound_2", ",", "y_bound_2", ":", "(", "(", "y_bound_2", "-", "y_bound_1", ")", "*", "x_check", "-", "(", "x_bound_2", "-", "x_bound_1", ")", "*", "y_check", ">", "x_bound_1", "*", "y_bound_2", "-", "y_bound_1", "*", "x_bound_2", ")", "+", "0", "# Identify the correct sector for each point to be evaluated", "these", "=", "np", ".", "ones", "(", "m", ",", "dtype", "=", "bool", ")", "max_loops", "=", "self", ".", "x_n", "+", "self", ".", "y_n", "loops", "=", "0", "while", "np", ".", "any", "(", "these", ")", "and", "loops", "<", "max_loops", ":", "# Get coordinates for the four vertices: (xA,yA),...,(xD,yD)", "x_temp", "=", "x", "[", "these", "]", "y_temp", "=", "y", "[", "these", "]", "xA", "=", "self", ".", "x_values", "[", "x_pos_guess", "[", "these", "]", ",", "y_pos_guess", "[", "these", "]", "]", "xB", "=", "self", ".", "x_values", "[", "x_pos_guess", "[", "these", "]", "+", "1", ",", "y_pos_guess", "[", "these", "]", "]", "xC", "=", "self", ".", "x_values", "[", "x_pos_guess", "[", "these", "]", ",", "y_pos_guess", "[", "these", "]", "+", "1", "]", "xD", "=", "self", ".", "x_values", "[", "x_pos_guess", "[", "these", "]", "+", "1", ",", "y_pos_guess", "[", "these", "]", "+", "1", "]", "yA", "=", "self", ".", "y_values", "[", "x_pos_guess", "[", "these", "]", ",", "y_pos_guess", "[", "these", "]", "]", "yB", "=", "self", ".", "y_values", "[", "x_pos_guess", "[", "these", "]", "+", "1", ",", "y_pos_guess", "[", "these", "]", "]", "yC", "=", "self", ".", "y_values", "[", "x_pos_guess", "[", "these", "]", ",", "y_pos_guess", "[", "these", "]", "+", "1", "]", "yD", "=", "self", ".", "y_values", "[", "x_pos_guess", "[", "these", "]", "+", "1", ",", "y_pos_guess", "[", "these", "]", "+", "1", "]", "# Check the \"bounding box\" for the sector: is this guess plausible?", "move_down", "=", "(", "y_temp", "<", "np", ".", "minimum", "(", "yA", ",", "yB", ")", ")", "+", "0", "move_right", "=", "(", "x_temp", ">", "np", ".", "maximum", "(", "xB", ",", "xD", ")", ")", "+", "0", "move_up", "=", "(", "y_temp", ">", "np", ".", "maximum", "(", "yC", ",", "yD", ")", ")", "+", "0", "move_left", "=", "(", "x_temp", "<", "np", ".", "minimum", "(", "xA", ",", "xC", ")", ")", "+", "0", "# Check which boundaries are violated (and thus where to look next)", "c", "=", "(", "move_down", "+", "move_right", "+", "move_up", "+", "move_left", ")", "==", "0", "move_down", "[", "c", "]", "=", "violationCheck", "(", "x_temp", "[", "c", "]", ",", "y_temp", "[", "c", "]", ",", "xA", "[", "c", "]", ",", "yA", "[", "c", "]", ",", "xB", "[", "c", "]", ",", "yB", "[", "c", "]", ")", "move_right", "[", "c", "]", "=", "violationCheck", "(", "x_temp", "[", "c", "]", ",", "y_temp", "[", "c", "]", ",", "xB", "[", "c", "]", ",", "yB", "[", "c", "]", ",", "xD", "[", "c", "]", ",", "yD", "[", "c", "]", ")", "move_up", "[", "c", "]", "=", "violationCheck", "(", "x_temp", "[", "c", "]", ",", "y_temp", "[", "c", "]", ",", "xD", "[", "c", "]", ",", "yD", "[", "c", "]", ",", "xC", "[", "c", "]", ",", "yC", "[", "c", "]", ")", "move_left", "[", "c", "]", "=", "violationCheck", "(", "x_temp", "[", "c", "]", ",", "y_temp", "[", "c", "]", ",", "xC", "[", "c", "]", ",", "yC", "[", "c", "]", ",", "xA", "[", "c", "]", ",", "yA", "[", "c", "]", ")", "# Update the sector guess based on the violations", "x_pos_next", "=", "x_pos_guess", "[", "these", "]", "-", "move_left", "+", "move_right", "x_pos_next", "[", "x_pos_next", "<", "0", "]", "=", "0", "x_pos_next", "[", "x_pos_next", ">", "(", "self", ".", "x_n", "-", "2", ")", "]", "=", "self", ".", "x_n", "-", "2", "y_pos_next", "=", "y_pos_guess", "[", "these", "]", "-", "move_down", "+", "move_up", "y_pos_next", "[", "y_pos_next", "<", "0", "]", "=", "0", "y_pos_next", "[", "y_pos_next", ">", "(", "self", ".", "y_n", "-", "2", ")", "]", "=", "self", ".", "y_n", "-", "2", "# Check which sectors have not changed, and mark them as complete", "no_move", "=", "np", ".", "array", "(", "np", ".", "logical_and", "(", "x_pos_guess", "[", "these", "]", "==", "x_pos_next", ",", "y_pos_guess", "[", "these", "]", "==", "y_pos_next", ")", ")", "x_pos_guess", "[", "these", "]", "=", "x_pos_next", "y_pos_guess", "[", "these", "]", "=", "y_pos_next", "temp", "=", "these", ".", "nonzero", "(", ")", "these", "[", "temp", "[", "0", "]", "[", "no_move", "]", "]", "=", "False", "# Move to the next iteration of the search", "loops", "+=", "1", "# Return the output", "x_pos", "=", "x_pos_guess", "y_pos", "=", "y_pos_guess", "return", "x_pos", ",", "y_pos" ]
Finds the quadrilateral "sector" for each (x,y) point in the input. Only called as a subroutine of _evaluate(). Parameters ---------- x : np.array Values whose sector should be found. y : np.array Values whose sector should be found. Should be same size as x. Returns ------- x_pos : np.array Sector x-coordinates for each point of the input, of the same size. y_pos : np.array Sector y-coordinates for each point of the input, of the same size.
[ "Finds", "the", "quadrilateral", "sector", "for", "each", "(", "x", "y", ")", "point", "in", "the", "input", ".", "Only", "called", "as", "a", "subroutine", "of", "_evaluate", "()", "." ]
python
train
47.39759
robhowley/nhlscrapi
nhlscrapi/games/faceoffcomp.py
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/games/faceoffcomp.py#L129-L144
def fo_pct_by_zone(self): """ Get the by team face-off win % by zone. Format is :returns: dict ``{ 'home/away': { 'off/def/neut': % } }`` """ bz = self.by_zone return { t: { z: bz[t][z]['won']/(1.0*bz[t][z]['total']) if bz[t][z]['total'] else 0.0 for z in self.__zones if z != 'all' } for t in [ 'home', 'away' ] }
[ "def", "fo_pct_by_zone", "(", "self", ")", ":", "bz", "=", "self", ".", "by_zone", "return", "{", "t", ":", "{", "z", ":", "bz", "[", "t", "]", "[", "z", "]", "[", "'won'", "]", "/", "(", "1.0", "*", "bz", "[", "t", "]", "[", "z", "]", "[", "'total'", "]", ")", "if", "bz", "[", "t", "]", "[", "z", "]", "[", "'total'", "]", "else", "0.0", "for", "z", "in", "self", ".", "__zones", "if", "z", "!=", "'all'", "}", "for", "t", "in", "[", "'home'", ",", "'away'", "]", "}" ]
Get the by team face-off win % by zone. Format is :returns: dict ``{ 'home/away': { 'off/def/neut': % } }``
[ "Get", "the", "by", "team", "face", "-", "off", "win", "%", "by", "zone", ".", "Format", "is", ":", "returns", ":", "dict", "{", "home", "/", "away", ":", "{", "off", "/", "def", "/", "neut", ":", "%", "}", "}" ]
python
train
29.0625
onespacemedia/cms-redirects
redirects/models.py
https://github.com/onespacemedia/cms-redirects/blob/9a412dbd4fdac016fbe0ac7bf6773868169cb148/redirects/models.py#L52-L57
def sub_path(self, path): """ If this redirect is a regular expression, it will return a rewritten version of `path`; otherwise returns the `new_path`. """ if not self.regular_expression: return self.new_path return re.sub(self.old_path, self.new_path, path)
[ "def", "sub_path", "(", "self", ",", "path", ")", ":", "if", "not", "self", ".", "regular_expression", ":", "return", "self", ".", "new_path", "return", "re", ".", "sub", "(", "self", ".", "old_path", ",", "self", ".", "new_path", ",", "path", ")" ]
If this redirect is a regular expression, it will return a rewritten version of `path`; otherwise returns the `new_path`.
[ "If", "this", "redirect", "is", "a", "regular", "expression", "it", "will", "return", "a", "rewritten", "version", "of", "path", ";", "otherwise", "returns", "the", "new_path", "." ]
python
train
49.5
peri-source/peri
peri/opt/optimize.py
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1186-L1201
def check_terminate(self): """ Returns a Bool of whether to terminate. Checks whether a satisfactory minimum has been found or whether too many iterations have occurred. """ if not self._has_run: return False else: #1-3. errtol, paramtol, model cosine low enough? terminate = self.check_completion() #4. too many iterations?? terminate |= (self._num_iter >= self.max_iter) return terminate
[ "def", "check_terminate", "(", "self", ")", ":", "if", "not", "self", ".", "_has_run", ":", "return", "False", "else", ":", "#1-3. errtol, paramtol, model cosine low enough?", "terminate", "=", "self", ".", "check_completion", "(", ")", "#4. too many iterations??", "terminate", "|=", "(", "self", ".", "_num_iter", ">=", "self", ".", "max_iter", ")", "return", "terminate" ]
Returns a Bool of whether to terminate. Checks whether a satisfactory minimum has been found or whether too many iterations have occurred.
[ "Returns", "a", "Bool", "of", "whether", "to", "terminate", "." ]
python
valid
31.5
cloud9ers/gurumate
environment/lib/python2.7/site-packages/distribute-0.6.31-py2.7.egg/pkg_resources.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/distribute-0.6.31-py2.7.egg/pkg_resources.py#L812-L819
def add(self,dist): """Add `dist` if we ``can_add()`` it and it isn't already added""" if self.can_add(dist) and dist.has_version(): dists = self._distmap.setdefault(dist.key,[]) if dist not in dists: dists.append(dist) if dist.key in self._cache: _sort_dists(self._cache[dist.key])
[ "def", "add", "(", "self", ",", "dist", ")", ":", "if", "self", ".", "can_add", "(", "dist", ")", "and", "dist", ".", "has_version", "(", ")", ":", "dists", "=", "self", ".", "_distmap", ".", "setdefault", "(", "dist", ".", "key", ",", "[", "]", ")", "if", "dist", "not", "in", "dists", ":", "dists", ".", "append", "(", "dist", ")", "if", "dist", ".", "key", "in", "self", ".", "_cache", ":", "_sort_dists", "(", "self", ".", "_cache", "[", "dist", ".", "key", "]", ")" ]
Add `dist` if we ``can_add()`` it and it isn't already added
[ "Add", "dist", "if", "we", "can_add", "()", "it", "and", "it", "isn", "t", "already", "added" ]
python
test
45.875
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/setuptools/sandbox.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/setuptools/sandbox.py#L70-L84
def override_temp(replacement): """ Monkey-patch tempfile.tempdir with replacement, ensuring it exists """ if not os.path.isdir(replacement): os.makedirs(replacement) saved = tempfile.tempdir tempfile.tempdir = replacement try: yield finally: tempfile.tempdir = saved
[ "def", "override_temp", "(", "replacement", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "replacement", ")", ":", "os", ".", "makedirs", "(", "replacement", ")", "saved", "=", "tempfile", ".", "tempdir", "tempfile", ".", "tempdir", "=", "replacement", "try", ":", "yield", "finally", ":", "tempfile", ".", "tempdir", "=", "saved" ]
Monkey-patch tempfile.tempdir with replacement, ensuring it exists
[ "Monkey", "-", "patch", "tempfile", ".", "tempdir", "with", "replacement", "ensuring", "it", "exists" ]
python
test
20.8
echonest/pyechonest
pyechonest/song.py
https://github.com/echonest/pyechonest/blob/d8c7af6c1da699b50b2f4b1bd3c0febe72e7f1ee/pyechonest/song.py#L323-L356
def get_tracks(self, catalog, cache=True): """Get the tracks for a song given a catalog. Args: catalog (str): a string representing the catalog whose track you want to retrieve. Returns: A list of Track dicts. Example: >>> s = song.Song('SOWDASQ12A6310F24F') >>> s.get_tracks('7digital')[0] {u'catalog': u'7digital', u'foreign_id': u'7digital:track:8445818', u'id': u'TRJGNNY12903CC625C', u'preview_url': u'http://previews.7digital.com/clips/34/8445818.clip.mp3', u'release_image': u'http://cdn.7static.com/static/img/sleeveart/00/007/628/0000762838_200.jpg'} >>> """ if not (cache and ('tracks' in self.cache) and (catalog in [td['catalog'] for td in self.cache['tracks']])): kwargs = { 'bucket':['tracks', 'id:%s' % catalog], } response = self.get_attribute('profile', **kwargs) if not 'tracks' in self.cache: self.cache['tracks'] = [] # don't blow away the cache for other catalogs potential_tracks = response['songs'][0].get('tracks', []) existing_track_ids = [tr['foreign_id'] for tr in self.cache['tracks']] new_tds = filter(lambda tr: tr['foreign_id'] not in existing_track_ids, potential_tracks) self.cache['tracks'].extend(new_tds) return filter(lambda tr: tr['catalog']==util.map_idspace(catalog), self.cache['tracks'])
[ "def", "get_tracks", "(", "self", ",", "catalog", ",", "cache", "=", "True", ")", ":", "if", "not", "(", "cache", "and", "(", "'tracks'", "in", "self", ".", "cache", ")", "and", "(", "catalog", "in", "[", "td", "[", "'catalog'", "]", "for", "td", "in", "self", ".", "cache", "[", "'tracks'", "]", "]", ")", ")", ":", "kwargs", "=", "{", "'bucket'", ":", "[", "'tracks'", ",", "'id:%s'", "%", "catalog", "]", ",", "}", "response", "=", "self", ".", "get_attribute", "(", "'profile'", ",", "*", "*", "kwargs", ")", "if", "not", "'tracks'", "in", "self", ".", "cache", ":", "self", ".", "cache", "[", "'tracks'", "]", "=", "[", "]", "# don't blow away the cache for other catalogs", "potential_tracks", "=", "response", "[", "'songs'", "]", "[", "0", "]", ".", "get", "(", "'tracks'", ",", "[", "]", ")", "existing_track_ids", "=", "[", "tr", "[", "'foreign_id'", "]", "for", "tr", "in", "self", ".", "cache", "[", "'tracks'", "]", "]", "new_tds", "=", "filter", "(", "lambda", "tr", ":", "tr", "[", "'foreign_id'", "]", "not", "in", "existing_track_ids", ",", "potential_tracks", ")", "self", ".", "cache", "[", "'tracks'", "]", ".", "extend", "(", "new_tds", ")", "return", "filter", "(", "lambda", "tr", ":", "tr", "[", "'catalog'", "]", "==", "util", ".", "map_idspace", "(", "catalog", ")", ",", "self", ".", "cache", "[", "'tracks'", "]", ")" ]
Get the tracks for a song given a catalog. Args: catalog (str): a string representing the catalog whose track you want to retrieve. Returns: A list of Track dicts. Example: >>> s = song.Song('SOWDASQ12A6310F24F') >>> s.get_tracks('7digital')[0] {u'catalog': u'7digital', u'foreign_id': u'7digital:track:8445818', u'id': u'TRJGNNY12903CC625C', u'preview_url': u'http://previews.7digital.com/clips/34/8445818.clip.mp3', u'release_image': u'http://cdn.7static.com/static/img/sleeveart/00/007/628/0000762838_200.jpg'} >>>
[ "Get", "the", "tracks", "for", "a", "song", "given", "a", "catalog", ".", "Args", ":", "catalog", "(", "str", ")", ":", "a", "string", "representing", "the", "catalog", "whose", "track", "you", "want", "to", "retrieve", ".", "Returns", ":", "A", "list", "of", "Track", "dicts", ".", "Example", ":", ">>>", "s", "=", "song", ".", "Song", "(", "SOWDASQ12A6310F24F", ")", ">>>", "s", ".", "get_tracks", "(", "7digital", ")", "[", "0", "]", "{", "u", "catalog", ":", "u", "7digital", "u", "foreign_id", ":", "u", "7digital", ":", "track", ":", "8445818", "u", "id", ":", "u", "TRJGNNY12903CC625C", "u", "preview_url", ":", "u", "http", ":", "//", "previews", ".", "7digital", ".", "com", "/", "clips", "/", "34", "/", "8445818", ".", "clip", ".", "mp3", "u", "release_image", ":", "u", "http", ":", "//", "cdn", ".", "7static", ".", "com", "/", "static", "/", "img", "/", "sleeveart", "/", "00", "/", "007", "/", "628", "/", "0000762838_200", ".", "jpg", "}", ">>>" ]
python
train
46.176471
apache/incubator-mxnet
python/mxnet/contrib/quantization.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/quantization.py#L137-L149
def collect(self, name, arr): """Callback function for collecting layer output NDArrays.""" name = py_str(name) if self.include_layer is not None and not self.include_layer(name): return handle = ctypes.cast(arr, NDArrayHandle) arr = NDArray(handle, writable=False).copyto(cpu()) if self.logger is not None: self.logger.info("Collecting layer %s output of shape %s" % (name, arr.shape)) if name in self.nd_dict: self.nd_dict[name].append(arr) else: self.nd_dict[name] = [arr]
[ "def", "collect", "(", "self", ",", "name", ",", "arr", ")", ":", "name", "=", "py_str", "(", "name", ")", "if", "self", ".", "include_layer", "is", "not", "None", "and", "not", "self", ".", "include_layer", "(", "name", ")", ":", "return", "handle", "=", "ctypes", ".", "cast", "(", "arr", ",", "NDArrayHandle", ")", "arr", "=", "NDArray", "(", "handle", ",", "writable", "=", "False", ")", ".", "copyto", "(", "cpu", "(", ")", ")", "if", "self", ".", "logger", "is", "not", "None", ":", "self", ".", "logger", ".", "info", "(", "\"Collecting layer %s output of shape %s\"", "%", "(", "name", ",", "arr", ".", "shape", ")", ")", "if", "name", "in", "self", ".", "nd_dict", ":", "self", ".", "nd_dict", "[", "name", "]", ".", "append", "(", "arr", ")", "else", ":", "self", ".", "nd_dict", "[", "name", "]", "=", "[", "arr", "]" ]
Callback function for collecting layer output NDArrays.
[ "Callback", "function", "for", "collecting", "layer", "output", "NDArrays", "." ]
python
train
44.230769
tanghaibao/jcvi
jcvi/variation/cnv.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/cnv.py#L922-L975
def mergecn(args): """ %prog mergecn FACE.csv Compile matrix of GC-corrected copy numbers. Place a bunch of folders in csv file. Each folder will be scanned, one chromosomes after another. """ p = OptionParser(mergecn.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) csvfile, = args samples = [x.replace("-cn", "").strip().strip("/") for x in open(csvfile)] betadir = "beta" mkdir(betadir) for seqid in allsomes: names = [op.join(s + "-cn", "{}.{}.cn". format(op.basename(s), seqid)) for s in samples] arrays = [np.fromfile(name, dtype=np.float) for name in names] shapes = [x.shape[0] for x in arrays] med_shape = np.median(shapes) arrays = [x for x in arrays if x.shape[0] == med_shape] ploidy = 2 if seqid not in ("chrY", "chrM") else 1 if seqid in sexsomes: chr_med = [np.median([x for x in a if x > 0]) for a in arrays] chr_med = np.array(chr_med) idx = get_kmeans(chr_med, k=2) zero_med = np.median(chr_med[idx == 0]) one_med = np.median(chr_med[idx == 1]) logging.debug("K-means with {} c0:{} c1:{}" .format(seqid, zero_med, one_med)) higher_idx = 1 if one_med > zero_med else 0 # Use the higher mean coverage componen arrays = np.array(arrays)[idx == higher_idx] arrays = [[x] for x in arrays] ar = np.concatenate(arrays) print(seqid, ar.shape) rows, columns = ar.shape beta = [] std = [] for j in xrange(columns): a = ar[:, j] beta.append(np.median(a)) std.append(np.std(a) / np.mean(a)) beta = np.array(beta) / ploidy betafile = op.join(betadir, "{}.beta".format(seqid)) beta.tofile(betafile) stdfile = op.join(betadir, "{}.std".format(seqid)) std = np.array(std) std.tofile(stdfile) logging.debug("Written to `{}`".format(betafile)) ar.tofile("{}.bin".format(seqid))
[ "def", "mergecn", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "mergecn", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "csvfile", ",", "=", "args", "samples", "=", "[", "x", ".", "replace", "(", "\"-cn\"", ",", "\"\"", ")", ".", "strip", "(", ")", ".", "strip", "(", "\"/\"", ")", "for", "x", "in", "open", "(", "csvfile", ")", "]", "betadir", "=", "\"beta\"", "mkdir", "(", "betadir", ")", "for", "seqid", "in", "allsomes", ":", "names", "=", "[", "op", ".", "join", "(", "s", "+", "\"-cn\"", ",", "\"{}.{}.cn\"", ".", "format", "(", "op", ".", "basename", "(", "s", ")", ",", "seqid", ")", ")", "for", "s", "in", "samples", "]", "arrays", "=", "[", "np", ".", "fromfile", "(", "name", ",", "dtype", "=", "np", ".", "float", ")", "for", "name", "in", "names", "]", "shapes", "=", "[", "x", ".", "shape", "[", "0", "]", "for", "x", "in", "arrays", "]", "med_shape", "=", "np", ".", "median", "(", "shapes", ")", "arrays", "=", "[", "x", "for", "x", "in", "arrays", "if", "x", ".", "shape", "[", "0", "]", "==", "med_shape", "]", "ploidy", "=", "2", "if", "seqid", "not", "in", "(", "\"chrY\"", ",", "\"chrM\"", ")", "else", "1", "if", "seqid", "in", "sexsomes", ":", "chr_med", "=", "[", "np", ".", "median", "(", "[", "x", "for", "x", "in", "a", "if", "x", ">", "0", "]", ")", "for", "a", "in", "arrays", "]", "chr_med", "=", "np", ".", "array", "(", "chr_med", ")", "idx", "=", "get_kmeans", "(", "chr_med", ",", "k", "=", "2", ")", "zero_med", "=", "np", ".", "median", "(", "chr_med", "[", "idx", "==", "0", "]", ")", "one_med", "=", "np", ".", "median", "(", "chr_med", "[", "idx", "==", "1", "]", ")", "logging", ".", "debug", "(", "\"K-means with {} c0:{} c1:{}\"", ".", "format", "(", "seqid", ",", "zero_med", ",", "one_med", ")", ")", "higher_idx", "=", "1", "if", "one_med", ">", "zero_med", "else", "0", "# Use the higher mean coverage componen", "arrays", "=", "np", ".", "array", "(", "arrays", ")", "[", "idx", "==", "higher_idx", "]", "arrays", "=", "[", "[", "x", "]", "for", "x", "in", "arrays", "]", "ar", "=", "np", ".", "concatenate", "(", "arrays", ")", "print", "(", "seqid", ",", "ar", ".", "shape", ")", "rows", ",", "columns", "=", "ar", ".", "shape", "beta", "=", "[", "]", "std", "=", "[", "]", "for", "j", "in", "xrange", "(", "columns", ")", ":", "a", "=", "ar", "[", ":", ",", "j", "]", "beta", ".", "append", "(", "np", ".", "median", "(", "a", ")", ")", "std", ".", "append", "(", "np", ".", "std", "(", "a", ")", "/", "np", ".", "mean", "(", "a", ")", ")", "beta", "=", "np", ".", "array", "(", "beta", ")", "/", "ploidy", "betafile", "=", "op", ".", "join", "(", "betadir", ",", "\"{}.beta\"", ".", "format", "(", "seqid", ")", ")", "beta", ".", "tofile", "(", "betafile", ")", "stdfile", "=", "op", ".", "join", "(", "betadir", ",", "\"{}.std\"", ".", "format", "(", "seqid", ")", ")", "std", "=", "np", ".", "array", "(", "std", ")", "std", ".", "tofile", "(", "stdfile", ")", "logging", ".", "debug", "(", "\"Written to `{}`\"", ".", "format", "(", "betafile", ")", ")", "ar", ".", "tofile", "(", "\"{}.bin\"", ".", "format", "(", "seqid", ")", ")" ]
%prog mergecn FACE.csv Compile matrix of GC-corrected copy numbers. Place a bunch of folders in csv file. Each folder will be scanned, one chromosomes after another.
[ "%prog", "mergecn", "FACE", ".", "csv" ]
python
train
38.703704
pip-services3-python/pip-services3-commons-python
pip_services3_commons/commands/CommandSet.py
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/commands/CommandSet.py#L152-L159
def add_event(self, event): """ Adds an IEvent event to this command set. :param event: an event instance to be added """ self._events.append(event) self._events_by_name[event.get_name] = event
[ "def", "add_event", "(", "self", ",", "event", ")", ":", "self", ".", "_events", ".", "append", "(", "event", ")", "self", ".", "_events_by_name", "[", "event", ".", "get_name", "]", "=", "event" ]
Adds an IEvent event to this command set. :param event: an event instance to be added
[ "Adds", "an", "IEvent", "event", "to", "this", "command", "set", ".", ":", "param", "event", ":", "an", "event", "instance", "to", "be", "added" ]
python
train
30.375
boriel/zxbasic
api/symboltable.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/api/symboltable.py#L762-L804
def declare_func(self, id_, lineno, type_=None): """ Declares a function in the current scope. Checks whether the id exist or not (error if exists). And creates the entry at the symbol table. """ if not self.check_class(id_, 'function', lineno): entry = self.get_entry(id_) # Must not exist or have _class = None or Function and declared = False an = 'an' if entry.class_.lower()[0] in 'aeio' else 'a' syntax_error(lineno, "'%s' already declared as %s %s at %i" % (id_, an, entry.class_, entry.lineno)) return None entry = self.get_entry(id_) # Must not exist or have _class = None or Function and declared = False if entry is not None: if entry.declared and not entry.forwarded: syntax_error(lineno, "Duplicate function name '%s', previously defined at %i" % (id_, entry.lineno)) return None if entry.class_ != CLASS.unknown and entry.callable is False: # HINT: Must use is False here. syntax_error_not_array_nor_func(lineno, id_) return None if id_[-1] in DEPRECATED_SUFFIXES and entry.type_ != self.basic_types[SUFFIX_TYPE[id_[-1]]]: syntax_error_func_type_mismatch(lineno, entry) if entry.token == 'VAR': # This was a function used in advance symbols.VAR.to_function(entry, lineno=lineno) entry.mangled = '%s_%s' % (self.mangle, entry.name) # HINT: mangle for nexted scopes else: entry = self.declare(id_, lineno, symbols.FUNCTION(id_, lineno, type_=type_)) if entry.forwarded: entry.forwared = False # No longer forwarded old_type = entry.type_ # Remembers the old type if entry.type_ is not None: if entry.type_ != old_type: syntax_error_func_type_mismatch(lineno, entry) else: entry.type_ = old_type else: entry.params_size = 0 # Size of parameters entry.locals_size = 0 # Size of local variables return entry
[ "def", "declare_func", "(", "self", ",", "id_", ",", "lineno", ",", "type_", "=", "None", ")", ":", "if", "not", "self", ".", "check_class", "(", "id_", ",", "'function'", ",", "lineno", ")", ":", "entry", "=", "self", ".", "get_entry", "(", "id_", ")", "# Must not exist or have _class = None or Function and declared = False", "an", "=", "'an'", "if", "entry", ".", "class_", ".", "lower", "(", ")", "[", "0", "]", "in", "'aeio'", "else", "'a'", "syntax_error", "(", "lineno", ",", "\"'%s' already declared as %s %s at %i\"", "%", "(", "id_", ",", "an", ",", "entry", ".", "class_", ",", "entry", ".", "lineno", ")", ")", "return", "None", "entry", "=", "self", ".", "get_entry", "(", "id_", ")", "# Must not exist or have _class = None or Function and declared = False", "if", "entry", "is", "not", "None", ":", "if", "entry", ".", "declared", "and", "not", "entry", ".", "forwarded", ":", "syntax_error", "(", "lineno", ",", "\"Duplicate function name '%s', previously defined at %i\"", "%", "(", "id_", ",", "entry", ".", "lineno", ")", ")", "return", "None", "if", "entry", ".", "class_", "!=", "CLASS", ".", "unknown", "and", "entry", ".", "callable", "is", "False", ":", "# HINT: Must use is False here.", "syntax_error_not_array_nor_func", "(", "lineno", ",", "id_", ")", "return", "None", "if", "id_", "[", "-", "1", "]", "in", "DEPRECATED_SUFFIXES", "and", "entry", ".", "type_", "!=", "self", ".", "basic_types", "[", "SUFFIX_TYPE", "[", "id_", "[", "-", "1", "]", "]", "]", ":", "syntax_error_func_type_mismatch", "(", "lineno", ",", "entry", ")", "if", "entry", ".", "token", "==", "'VAR'", ":", "# This was a function used in advance", "symbols", ".", "VAR", ".", "to_function", "(", "entry", ",", "lineno", "=", "lineno", ")", "entry", ".", "mangled", "=", "'%s_%s'", "%", "(", "self", ".", "mangle", ",", "entry", ".", "name", ")", "# HINT: mangle for nexted scopes", "else", ":", "entry", "=", "self", ".", "declare", "(", "id_", ",", "lineno", ",", "symbols", ".", "FUNCTION", "(", "id_", ",", "lineno", ",", "type_", "=", "type_", ")", ")", "if", "entry", ".", "forwarded", ":", "entry", ".", "forwared", "=", "False", "# No longer forwarded", "old_type", "=", "entry", ".", "type_", "# Remembers the old type", "if", "entry", ".", "type_", "is", "not", "None", ":", "if", "entry", ".", "type_", "!=", "old_type", ":", "syntax_error_func_type_mismatch", "(", "lineno", ",", "entry", ")", "else", ":", "entry", ".", "type_", "=", "old_type", "else", ":", "entry", ".", "params_size", "=", "0", "# Size of parameters", "entry", ".", "locals_size", "=", "0", "# Size of local variables", "return", "entry" ]
Declares a function in the current scope. Checks whether the id exist or not (error if exists). And creates the entry at the symbol table.
[ "Declares", "a", "function", "in", "the", "current", "scope", ".", "Checks", "whether", "the", "id", "exist", "or", "not", "(", "error", "if", "exists", ")", ".", "And", "creates", "the", "entry", "at", "the", "symbol", "table", "." ]
python
train
49.139535
Miserlou/django-easy-timezones
easy_timezones/views.py
https://github.com/Miserlou/django-easy-timezones/blob/a25c6312a7ecb3ebfac7b2c458b1c5be5d45a239/easy_timezones/views.py#L8-L18
def with_tz(request): """ Get the time with TZ enabled """ dt = datetime.now() t = Template('{% load tz %}{% localtime on %}{% get_current_timezone as TIME_ZONE %}{{ TIME_ZONE }}{% endlocaltime %}') c = RequestContext(request) response = t.render(c) return HttpResponse(response)
[ "def", "with_tz", "(", "request", ")", ":", "dt", "=", "datetime", ".", "now", "(", ")", "t", "=", "Template", "(", "'{% load tz %}{% localtime on %}{% get_current_timezone as TIME_ZONE %}{{ TIME_ZONE }}{% endlocaltime %}'", ")", "c", "=", "RequestContext", "(", "request", ")", "response", "=", "t", ".", "render", "(", "c", ")", "return", "HttpResponse", "(", "response", ")" ]
Get the time with TZ enabled
[ "Get", "the", "time", "with", "TZ", "enabled" ]
python
valid
28.090909
striglia/stockfighter
stockfighter/stockfighter.py
https://github.com/striglia/stockfighter/blob/df908f5919d6f861601cd00c906a049d04253d47/stockfighter/stockfighter.py#L60-L80
def place_new_order(self, stock, price, qty, direction, order_type): """Place an order for a stock. https://starfighter.readme.io/docs/place-new-order """ url_fragment = 'venues/{venue}/stocks/{stock}/orders'.format( venue=self.venue, stock=stock, ) data = { "stock": stock, "price": price, "venue": self.venue, "account": self.account, "qty": qty, "direction": direction, "orderType": order_type, } url = urljoin(self.base_url, url_fragment) resp = self.session.post(url, json=data) return resp.json()
[ "def", "place_new_order", "(", "self", ",", "stock", ",", "price", ",", "qty", ",", "direction", ",", "order_type", ")", ":", "url_fragment", "=", "'venues/{venue}/stocks/{stock}/orders'", ".", "format", "(", "venue", "=", "self", ".", "venue", ",", "stock", "=", "stock", ",", ")", "data", "=", "{", "\"stock\"", ":", "stock", ",", "\"price\"", ":", "price", ",", "\"venue\"", ":", "self", ".", "venue", ",", "\"account\"", ":", "self", ".", "account", ",", "\"qty\"", ":", "qty", ",", "\"direction\"", ":", "direction", ",", "\"orderType\"", ":", "order_type", ",", "}", "url", "=", "urljoin", "(", "self", ".", "base_url", ",", "url_fragment", ")", "resp", "=", "self", ".", "session", ".", "post", "(", "url", ",", "json", "=", "data", ")", "return", "resp", ".", "json", "(", ")" ]
Place an order for a stock. https://starfighter.readme.io/docs/place-new-order
[ "Place", "an", "order", "for", "a", "stock", "." ]
python
train
31.285714
NearHuscarl/py-currency
currency/currency.py
https://github.com/NearHuscarl/py-currency/blob/4e30426399872fd6bfaa4c752a91d67c2d7bf52c/currency/currency.py#L110-L131
def pretty(price, currency, *, abbrev=True, trim=True): """ return format price with symbol. Example format(100, 'USD') return '$100' pretty(price, currency, abbrev=True, trim=False) abbrev: True: print value + symbol. Symbol can either be placed before or after value False: print value + currency code. currency code is placed behind value trim: True: trim float value to the maximum digit numbers of that currency False: keep number of decimal in initial argument """ currency = validate_currency(currency) price = validate_price(price) space = '' if nospace(currency) else ' ' fmtstr = '' if trim: fmtstr = '{:0,.{x}f}'.format(price, x=decimals(currency)).rstrip('0').rstrip('.') else: fmtstr = '{:0,}'.format(price).rstrip('0').rstrip('.') if abbrev: # use currency symbol if issuffix(currency): return fmtstr + space + symbol(currency) return symbol(currency, native=False) + space + fmtstr return fmtstr + ' ' + code(currency)
[ "def", "pretty", "(", "price", ",", "currency", ",", "*", ",", "abbrev", "=", "True", ",", "trim", "=", "True", ")", ":", "currency", "=", "validate_currency", "(", "currency", ")", "price", "=", "validate_price", "(", "price", ")", "space", "=", "''", "if", "nospace", "(", "currency", ")", "else", "' '", "fmtstr", "=", "''", "if", "trim", ":", "fmtstr", "=", "'{:0,.{x}f}'", ".", "format", "(", "price", ",", "x", "=", "decimals", "(", "currency", ")", ")", ".", "rstrip", "(", "'0'", ")", ".", "rstrip", "(", "'.'", ")", "else", ":", "fmtstr", "=", "'{:0,}'", ".", "format", "(", "price", ")", ".", "rstrip", "(", "'0'", ")", ".", "rstrip", "(", "'.'", ")", "if", "abbrev", ":", "# use currency symbol", "if", "issuffix", "(", "currency", ")", ":", "return", "fmtstr", "+", "space", "+", "symbol", "(", "currency", ")", "return", "symbol", "(", "currency", ",", "native", "=", "False", ")", "+", "space", "+", "fmtstr", "return", "fmtstr", "+", "' '", "+", "code", "(", "currency", ")" ]
return format price with symbol. Example format(100, 'USD') return '$100' pretty(price, currency, abbrev=True, trim=False) abbrev: True: print value + symbol. Symbol can either be placed before or after value False: print value + currency code. currency code is placed behind value trim: True: trim float value to the maximum digit numbers of that currency False: keep number of decimal in initial argument
[ "return", "format", "price", "with", "symbol", ".", "Example", "format", "(", "100", "USD", ")", "return", "$100", "pretty", "(", "price", "currency", "abbrev", "=", "True", "trim", "=", "False", ")", "abbrev", ":", "True", ":", "print", "value", "+", "symbol", ".", "Symbol", "can", "either", "be", "placed", "before", "or", "after", "value", "False", ":", "print", "value", "+", "currency", "code", ".", "currency", "code", "is", "placed", "behind", "value", "trim", ":", "True", ":", "trim", "float", "value", "to", "the", "maximum", "digit", "numbers", "of", "that", "currency", "False", ":", "keep", "number", "of", "decimal", "in", "initial", "argument" ]
python
train
42.863636
agoragames/haigha
haigha/connections/rabbit_connection.py
https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/connections/rabbit_connection.py#L206-L217
def _recv_ack(self, method_frame): '''Receive an ack from the broker.''' if self._ack_listener: delivery_tag = method_frame.args.read_longlong() multiple = method_frame.args.read_bit() if multiple: while self._last_ack_id < delivery_tag: self._last_ack_id += 1 self._ack_listener(self._last_ack_id) else: self._last_ack_id = delivery_tag self._ack_listener(self._last_ack_id)
[ "def", "_recv_ack", "(", "self", ",", "method_frame", ")", ":", "if", "self", ".", "_ack_listener", ":", "delivery_tag", "=", "method_frame", ".", "args", ".", "read_longlong", "(", ")", "multiple", "=", "method_frame", ".", "args", ".", "read_bit", "(", ")", "if", "multiple", ":", "while", "self", ".", "_last_ack_id", "<", "delivery_tag", ":", "self", ".", "_last_ack_id", "+=", "1", "self", ".", "_ack_listener", "(", "self", ".", "_last_ack_id", ")", "else", ":", "self", ".", "_last_ack_id", "=", "delivery_tag", "self", ".", "_ack_listener", "(", "self", ".", "_last_ack_id", ")" ]
Receive an ack from the broker.
[ "Receive", "an", "ack", "from", "the", "broker", "." ]
python
train
43
psd-tools/psd-tools
src/psd_tools/api/mask.py
https://github.com/psd-tools/psd-tools/blob/4952b57bcf1cf2c1f16fd9d6d51d4fa0b53bce4e/src/psd_tools/api/mask.py#L32-L34
def bbox(self): """BBox""" return self.left, self.top, self.right, self.bottom
[ "def", "bbox", "(", "self", ")", ":", "return", "self", ".", "left", ",", "self", ".", "top", ",", "self", ".", "right", ",", "self", ".", "bottom" ]
BBox
[ "BBox" ]
python
train
30.666667
klahnakoski/pyLibrary
mo_dots/__init__.py
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/mo_dots/__init__.py#L30-L38
def inverse(d): """ reverse the k:v pairs """ output = {} for k, v in unwrap(d).items(): output[v] = output.get(v, []) output[v].append(k) return output
[ "def", "inverse", "(", "d", ")", ":", "output", "=", "{", "}", "for", "k", ",", "v", "in", "unwrap", "(", "d", ")", ".", "items", "(", ")", ":", "output", "[", "v", "]", "=", "output", ".", "get", "(", "v", ",", "[", "]", ")", "output", "[", "v", "]", ".", "append", "(", "k", ")", "return", "output" ]
reverse the k:v pairs
[ "reverse", "the", "k", ":", "v", "pairs" ]
python
train
20.444444
shmir/PyTrafficGenerator
trafficgenerator/tgn_object.py
https://github.com/shmir/PyTrafficGenerator/blob/382e5d549c83404af2a6571fe19c9e71df8bac14/trafficgenerator/tgn_object.py#L249-L252
def del_object_from_parent(self): """ Delete object from parent object. """ if self.parent: self.parent.objects.pop(self.ref)
[ "def", "del_object_from_parent", "(", "self", ")", ":", "if", "self", ".", "parent", ":", "self", ".", "parent", ".", "objects", ".", "pop", "(", "self", ".", "ref", ")" ]
Delete object from parent object.
[ "Delete", "object", "from", "parent", "object", "." ]
python
train
37.5
littlemo/moear-spider-zhihudaily
moear_spider_zhihudaily/spiders/zhihu_daily.py
https://github.com/littlemo/moear-spider-zhihudaily/blob/1e4e60b547afe3e2fbb3bbcb7d07a75dca608149/moear_spider_zhihudaily/spiders/zhihu_daily.py#L126-L166
def parse_post(self, response): ''' 根据 :meth:`.ZhihuDailySpider.parse` 中生成的具体文章地址,获取到文章内容, 并对其进行格式化处理,结果填充到对象属性 ``item_list`` 中 :param Response response: 由 ``Scrapy`` 调用并传入的请求响应对象 ''' content = json.loads(response.body.decode(), encoding='UTF-8') post = response.meta['post'] post['origin_url'] = content.get('share_url', '') if not all([post['origin_url']]): raise ValueError('原文地址为空') post['title'] = html.escape(content.get('title', '')) if not all([post['title']]): raise ValueError('文章标题为空 - {}'.format(post.get('origin_url'))) # 单独处理type字段为1的情况,即该文章为站外转发文章 if content.get('type') == 1: self.logger.warn('遇到站外文章,单独处理 - {}'.format(post['title'])) return post soup = BeautifulSoup(content.get('body', ''), 'lxml') author_obj = soup.select('span.author') self.logger.debug(author_obj) if author_obj: author_list = [] for author in author_obj: author_list.append( author.string.rstrip(',, ').replace(',', ',')) author_list = list(set(author_list)) post['author'] = html.escape(','.join(author_list)) post['content'] = str(soup.div) # 继续填充post数据 image_back = content.get('images', [None])[0] if image_back: post['meta']['moear.cover_image_slug'] = \ content.get('image', image_back) self.logger.debug(post)
[ "def", "parse_post", "(", "self", ",", "response", ")", ":", "content", "=", "json", ".", "loads", "(", "response", ".", "body", ".", "decode", "(", ")", ",", "encoding", "=", "'UTF-8'", ")", "post", "=", "response", ".", "meta", "[", "'post'", "]", "post", "[", "'origin_url'", "]", "=", "content", ".", "get", "(", "'share_url'", ",", "''", ")", "if", "not", "all", "(", "[", "post", "[", "'origin_url'", "]", "]", ")", ":", "raise", "ValueError", "(", "'原文地址为空')", "", "post", "[", "'title'", "]", "=", "html", ".", "escape", "(", "content", ".", "get", "(", "'title'", ",", "''", ")", ")", "if", "not", "all", "(", "[", "post", "[", "'title'", "]", "]", ")", ":", "raise", "ValueError", "(", "'文章标题为空 - {}'.format(post", ".", "get('o", "r", "igin", "_", "url", "'", ")))", "", "", "", "# 单独处理type字段为1的情况,即该文章为站外转发文章", "if", "content", ".", "get", "(", "'type'", ")", "==", "1", ":", "self", ".", "logger", ".", "warn", "(", "'遇到站外文章,单独处理 - {}'.format(post['title'])", ")", "", "", "", "", "", "", "", "", "return", "post", "soup", "=", "BeautifulSoup", "(", "content", ".", "get", "(", "'body'", ",", "''", ")", ",", "'lxml'", ")", "author_obj", "=", "soup", ".", "select", "(", "'span.author'", ")", "self", ".", "logger", ".", "debug", "(", "author_obj", ")", "if", "author_obj", ":", "author_list", "=", "[", "]", "for", "author", "in", "author_obj", ":", "author_list", ".", "append", "(", "author", ".", "string", ".", "rstrip", "(", "',, ').", "r", "e", "place('", ",", "', ',", "'", ")", "", "", "author_list", "=", "list", "(", "set", "(", "author_list", ")", ")", "post", "[", "'author'", "]", "=", "html", ".", "escape", "(", "','.j", "o", "in(a", "u", "thor_list))", "", "", "post", "[", "'content'", "]", "=", "str", "(", "soup", ".", "div", ")", "# 继续填充post数据", "image_back", "=", "content", ".", "get", "(", "'images'", ",", "[", "None", "]", ")", "[", "0", "]", "if", "image_back", ":", "post", "[", "'meta'", "]", "[", "'moear.cover_image_slug'", "]", "=", "content", ".", "get", "(", "'image'", ",", "image_back", ")", "self", ".", "logger", ".", "debug", "(", "post", ")" ]
根据 :meth:`.ZhihuDailySpider.parse` 中生成的具体文章地址,获取到文章内容, 并对其进行格式化处理,结果填充到对象属性 ``item_list`` 中 :param Response response: 由 ``Scrapy`` 调用并传入的请求响应对象
[ "根据", ":", "meth", ":", ".", "ZhihuDailySpider", ".", "parse", "中生成的具体文章地址,获取到文章内容,", "并对其进行格式化处理,结果填充到对象属性", "item_list", "中" ]
python
train
36.780488
marshmallow-code/marshmallow
src/marshmallow/schema.py
https://github.com/marshmallow-code/marshmallow/blob/a6b6c4151f1fbf16f3774d4052ca2bddf6903750/src/marshmallow/schema.py#L416-L433
def _call_and_store(getter_func, data, field_name, error_store, index=None): """Call ``getter_func`` with ``data`` as its argument, and store any `ValidationErrors`. :param callable getter_func: Function for getting the serialized/deserialized value from ``data``. :param data: The data passed to ``getter_func``. :param str field_name: Field name. :param int index: Index of the item being validated, if validating a collection, otherwise `None`. """ try: value = getter_func(data) except ValidationError as err: error_store.store_error(err.messages, field_name, index=index) # When a Nested field fails validation, the marshalled data is stored # on the ValidationError's valid_data attribute return err.valid_data or missing return value
[ "def", "_call_and_store", "(", "getter_func", ",", "data", ",", "field_name", ",", "error_store", ",", "index", "=", "None", ")", ":", "try", ":", "value", "=", "getter_func", "(", "data", ")", "except", "ValidationError", "as", "err", ":", "error_store", ".", "store_error", "(", "err", ".", "messages", ",", "field_name", ",", "index", "=", "index", ")", "# When a Nested field fails validation, the marshalled data is stored", "# on the ValidationError's valid_data attribute", "return", "err", ".", "valid_data", "or", "missing", "return", "value" ]
Call ``getter_func`` with ``data`` as its argument, and store any `ValidationErrors`. :param callable getter_func: Function for getting the serialized/deserialized value from ``data``. :param data: The data passed to ``getter_func``. :param str field_name: Field name. :param int index: Index of the item being validated, if validating a collection, otherwise `None`.
[ "Call", "getter_func", "with", "data", "as", "its", "argument", "and", "store", "any", "ValidationErrors", "." ]
python
train
48.888889
ibis-project/ibis
ibis/bigquery/client.py
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/bigquery/client.py#L95-L98
def fetchall(self): """Fetch all rows.""" result = self.query.result() return [row.values() for row in result]
[ "def", "fetchall", "(", "self", ")", ":", "result", "=", "self", ".", "query", ".", "result", "(", ")", "return", "[", "row", ".", "values", "(", ")", "for", "row", "in", "result", "]" ]
Fetch all rows.
[ "Fetch", "all", "rows", "." ]
python
train
32.75
Opentrons/opentrons
api/src/opentrons/deck_calibration/endpoints.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/deck_calibration/endpoints.py#L541-L578
async def start(request): """ Begins the session manager for factory calibration, if a session is not already in progress, or if the "force" key is specified in the request. To force, use the following body: { "force": true } :return: The current session ID token or an error message """ global session try: body = await request.json() except json.decoder.JSONDecodeError: # Body will be null for requests without parameters (normal operation) log.debug("No body in {}".format(request)) body = {} if not session or body.get('force'): hardware = hw_from_req(request) if body.get('force') and session: await release(data={}) session = SessionManager(hardware) res = init_pipette() if res: status = 201 data = {'token': session.id, 'pipette': res} else: session = None status = 403 data = {'message': 'Error, pipette not recognized'} else: data = {'message': 'Error, session in progress. Use "force" key in' ' request body to override'} status = 409 return web.json_response(data, status=status)
[ "async", "def", "start", "(", "request", ")", ":", "global", "session", "try", ":", "body", "=", "await", "request", ".", "json", "(", ")", "except", "json", ".", "decoder", ".", "JSONDecodeError", ":", "# Body will be null for requests without parameters (normal operation)", "log", ".", "debug", "(", "\"No body in {}\"", ".", "format", "(", "request", ")", ")", "body", "=", "{", "}", "if", "not", "session", "or", "body", ".", "get", "(", "'force'", ")", ":", "hardware", "=", "hw_from_req", "(", "request", ")", "if", "body", ".", "get", "(", "'force'", ")", "and", "session", ":", "await", "release", "(", "data", "=", "{", "}", ")", "session", "=", "SessionManager", "(", "hardware", ")", "res", "=", "init_pipette", "(", ")", "if", "res", ":", "status", "=", "201", "data", "=", "{", "'token'", ":", "session", ".", "id", ",", "'pipette'", ":", "res", "}", "else", ":", "session", "=", "None", "status", "=", "403", "data", "=", "{", "'message'", ":", "'Error, pipette not recognized'", "}", "else", ":", "data", "=", "{", "'message'", ":", "'Error, session in progress. Use \"force\" key in'", "' request body to override'", "}", "status", "=", "409", "return", "web", ".", "json_response", "(", "data", ",", "status", "=", "status", ")" ]
Begins the session manager for factory calibration, if a session is not already in progress, or if the "force" key is specified in the request. To force, use the following body: { "force": true } :return: The current session ID token or an error message
[ "Begins", "the", "session", "manager", "for", "factory", "calibration", "if", "a", "session", "is", "not", "already", "in", "progress", "or", "if", "the", "force", "key", "is", "specified", "in", "the", "request", ".", "To", "force", "use", "the", "following", "body", ":", "{", "force", ":", "true", "}", ":", "return", ":", "The", "current", "session", "ID", "token", "or", "an", "error", "message" ]
python
train
31.973684
googleapis/protoc-java-resource-names-plugin
plugin/utils/path_template.py
https://github.com/googleapis/protoc-java-resource-names-plugin/blob/3fb2ec9b778f62646c05a7b960c893464c7791c0/plugin/utils/path_template.py#L190-L211
def parse(self, data): """Returns a list of path template segments parsed from data. Args: data: A path template string. Returns: A list of _Segment. """ self.binding_var_count = 0 self.segment_count = 0 segments = self.parser.parse(data) # Validation step: checks that there are no nested bindings. path_wildcard = False for segment in segments: if segment.kind == _TERMINAL and segment.literal == '**': if path_wildcard: raise ValidationException( 'validation error: path template cannot contain more ' 'than one path wildcard') path_wildcard = True return segments
[ "def", "parse", "(", "self", ",", "data", ")", ":", "self", ".", "binding_var_count", "=", "0", "self", ".", "segment_count", "=", "0", "segments", "=", "self", ".", "parser", ".", "parse", "(", "data", ")", "# Validation step: checks that there are no nested bindings.", "path_wildcard", "=", "False", "for", "segment", "in", "segments", ":", "if", "segment", ".", "kind", "==", "_TERMINAL", "and", "segment", ".", "literal", "==", "'**'", ":", "if", "path_wildcard", ":", "raise", "ValidationException", "(", "'validation error: path template cannot contain more '", "'than one path wildcard'", ")", "path_wildcard", "=", "True", "return", "segments" ]
Returns a list of path template segments parsed from data. Args: data: A path template string. Returns: A list of _Segment.
[ "Returns", "a", "list", "of", "path", "template", "segments", "parsed", "from", "data", "." ]
python
train
35.090909
inveniosoftware/invenio-oauth2server
invenio_oauth2server/provider.py
https://github.com/inveniosoftware/invenio-oauth2server/blob/7033d3495c1a2b830e101e43918e92a37bbb49f2/invenio_oauth2server/provider.py#L75-L88
def get_client(client_id): """Load the client. Needed for grant_type client_credentials. Add support for OAuth client_credentials access type, with user inactivation support. :param client_id: The client ID. :returns: The client instance or ``None``. """ client = Client.query.get(client_id) if client and client.user.active: return client
[ "def", "get_client", "(", "client_id", ")", ":", "client", "=", "Client", ".", "query", ".", "get", "(", "client_id", ")", "if", "client", "and", "client", ".", "user", ".", "active", ":", "return", "client" ]
Load the client. Needed for grant_type client_credentials. Add support for OAuth client_credentials access type, with user inactivation support. :param client_id: The client ID. :returns: The client instance or ``None``.
[ "Load", "the", "client", "." ]
python
train
26.642857
geertj/gruvi
lib/gruvi/dbus.py
https://github.com/geertj/gruvi/blob/1d77ca439600b6ea7a19aa1ee85dca0f3be3f3f8/lib/gruvi/dbus.py#L168-L174
def getMechanismName(self): """Return the authentication mechanism name.""" if self._server_side: mech = self._authenticator.current_mech return mech.getMechanismName() if mech else None else: return getattr(self._authenticator, 'authMech', None)
[ "def", "getMechanismName", "(", "self", ")", ":", "if", "self", ".", "_server_side", ":", "mech", "=", "self", ".", "_authenticator", ".", "current_mech", "return", "mech", ".", "getMechanismName", "(", ")", "if", "mech", "else", "None", "else", ":", "return", "getattr", "(", "self", ".", "_authenticator", ",", "'authMech'", ",", "None", ")" ]
Return the authentication mechanism name.
[ "Return", "the", "authentication", "mechanism", "name", "." ]
python
train
42.857143
frejanordsiek/hdf5storage
hdf5storage/utilities.py
https://github.com/frejanordsiek/hdf5storage/blob/539275141dd3a4efbbbfd9bdb978f3ed59e3f05d/hdf5storage/utilities.py#L548-L654
def write_object_array(f, data, options): """ Writes an array of objects recursively. Writes the elements of the given object array recursively in the HDF5 Group ``options.group_for_references`` and returns an ``h5py.Reference`` array to all the elements. Parameters ---------- f : h5py.File The HDF5 file handle that is open. data : numpy.ndarray of objects Numpy object array to write the elements of. options : hdf5storage.core.Options hdf5storage options object. Returns ------- obj_array : numpy.ndarray of h5py.Reference A reference array pointing to all the elements written to the HDF5 file. For those that couldn't be written, the respective element points to the canonical empty. Raises ------ TypeNotMatlabCompatibleError If writing a type not compatible with MATLAB and `options.action_for_matlab_incompatible` is set to ``'error'``. See Also -------- read_object_array hdf5storage.Options.group_for_references h5py.Reference """ # We need to grab the special reference dtype and make an empty # array to store all the references in. ref_dtype = h5py.special_dtype(ref=h5py.Reference) data_refs = np.zeros(shape=data.shape, dtype='object') # We need to make sure that the group to hold references is present, # and create it if it isn't. if options.group_for_references not in f: f.create_group(options.group_for_references) grp2 = f[options.group_for_references] if not isinstance(grp2, h5py.Group): del f[options.group_for_references] f.create_group(options.group_for_references) grp2 = f[options.group_for_references] # The Dataset 'a' needs to be present as the canonical empty. It is # just and np.uint32/64([0, 0]) with its a MATLAB_class of # 'canonical empty' and the 'MATLAB_empty' attribute set. If it # isn't present or is incorrectly formatted, it is created # truncating anything previously there. try: dset_a = grp2['a'] if dset_a.shape != (2,) \ or not dset_a.dtype.name.startswith('uint') \ or np.any(dset_a[...] != np.uint64([0, 0])) \ or get_attribute_string(dset_a, 'MATLAB_class') != \ 'canonical empty' \ or get_attribute(dset_a, 'MATLAB_empty') != 1: del grp2['a'] dset_a = grp2.create_dataset('a', data=np.uint64([0, 0])) set_attribute_string(dset_a, 'MATLAB_class', 'canonical empty') set_attribute(dset_a, 'MATLAB_empty', np.uint8(1)) except: dset_a = grp2.create_dataset('a', data=np.uint64([0, 0])) set_attribute_string(dset_a, 'MATLAB_class', 'canonical empty') set_attribute(dset_a, 'MATLAB_empty', np.uint8(1)) # Go through all the elements of data and write them, gabbing their # references and putting them in data_refs. They will be put in # group_for_references, which is also what the H5PATH needs to be # set to if we are doing MATLAB compatibility (otherwise, the # attribute needs to be deleted). If an element can't be written # (doing matlab compatibility, but it isn't compatible with matlab # and action_for_matlab_incompatible option is True), the reference # to the canonical empty will be used for the reference array to # point to. grp2name = grp2.name for index, x in np.ndenumerate(data): name_for_ref = next_unused_name_in_group(grp2, 16) write_data(f, grp2, name_for_ref, x, None, options) try: dset = grp2[name_for_ref] data_refs[index] = dset.ref if options.matlab_compatible: set_attribute_string(dset, 'H5PATH', grp2name) else: del_attribute(dset, 'H5PATH') except: data_refs[index] = dset_a.ref # Now, the dtype needs to be changed to the reference type and the # whole thing copied over to data_to_store. return data_refs.astype(ref_dtype).copy()
[ "def", "write_object_array", "(", "f", ",", "data", ",", "options", ")", ":", "# We need to grab the special reference dtype and make an empty", "# array to store all the references in.", "ref_dtype", "=", "h5py", ".", "special_dtype", "(", "ref", "=", "h5py", ".", "Reference", ")", "data_refs", "=", "np", ".", "zeros", "(", "shape", "=", "data", ".", "shape", ",", "dtype", "=", "'object'", ")", "# We need to make sure that the group to hold references is present,", "# and create it if it isn't.", "if", "options", ".", "group_for_references", "not", "in", "f", ":", "f", ".", "create_group", "(", "options", ".", "group_for_references", ")", "grp2", "=", "f", "[", "options", ".", "group_for_references", "]", "if", "not", "isinstance", "(", "grp2", ",", "h5py", ".", "Group", ")", ":", "del", "f", "[", "options", ".", "group_for_references", "]", "f", ".", "create_group", "(", "options", ".", "group_for_references", ")", "grp2", "=", "f", "[", "options", ".", "group_for_references", "]", "# The Dataset 'a' needs to be present as the canonical empty. It is", "# just and np.uint32/64([0, 0]) with its a MATLAB_class of", "# 'canonical empty' and the 'MATLAB_empty' attribute set. If it", "# isn't present or is incorrectly formatted, it is created", "# truncating anything previously there.", "try", ":", "dset_a", "=", "grp2", "[", "'a'", "]", "if", "dset_a", ".", "shape", "!=", "(", "2", ",", ")", "or", "not", "dset_a", ".", "dtype", ".", "name", ".", "startswith", "(", "'uint'", ")", "or", "np", ".", "any", "(", "dset_a", "[", "...", "]", "!=", "np", ".", "uint64", "(", "[", "0", ",", "0", "]", ")", ")", "or", "get_attribute_string", "(", "dset_a", ",", "'MATLAB_class'", ")", "!=", "'canonical empty'", "or", "get_attribute", "(", "dset_a", ",", "'MATLAB_empty'", ")", "!=", "1", ":", "del", "grp2", "[", "'a'", "]", "dset_a", "=", "grp2", ".", "create_dataset", "(", "'a'", ",", "data", "=", "np", ".", "uint64", "(", "[", "0", ",", "0", "]", ")", ")", "set_attribute_string", "(", "dset_a", ",", "'MATLAB_class'", ",", "'canonical empty'", ")", "set_attribute", "(", "dset_a", ",", "'MATLAB_empty'", ",", "np", ".", "uint8", "(", "1", ")", ")", "except", ":", "dset_a", "=", "grp2", ".", "create_dataset", "(", "'a'", ",", "data", "=", "np", ".", "uint64", "(", "[", "0", ",", "0", "]", ")", ")", "set_attribute_string", "(", "dset_a", ",", "'MATLAB_class'", ",", "'canonical empty'", ")", "set_attribute", "(", "dset_a", ",", "'MATLAB_empty'", ",", "np", ".", "uint8", "(", "1", ")", ")", "# Go through all the elements of data and write them, gabbing their", "# references and putting them in data_refs. They will be put in", "# group_for_references, which is also what the H5PATH needs to be", "# set to if we are doing MATLAB compatibility (otherwise, the", "# attribute needs to be deleted). If an element can't be written", "# (doing matlab compatibility, but it isn't compatible with matlab", "# and action_for_matlab_incompatible option is True), the reference", "# to the canonical empty will be used for the reference array to", "# point to.", "grp2name", "=", "grp2", ".", "name", "for", "index", ",", "x", "in", "np", ".", "ndenumerate", "(", "data", ")", ":", "name_for_ref", "=", "next_unused_name_in_group", "(", "grp2", ",", "16", ")", "write_data", "(", "f", ",", "grp2", ",", "name_for_ref", ",", "x", ",", "None", ",", "options", ")", "try", ":", "dset", "=", "grp2", "[", "name_for_ref", "]", "data_refs", "[", "index", "]", "=", "dset", ".", "ref", "if", "options", ".", "matlab_compatible", ":", "set_attribute_string", "(", "dset", ",", "'H5PATH'", ",", "grp2name", ")", "else", ":", "del_attribute", "(", "dset", ",", "'H5PATH'", ")", "except", ":", "data_refs", "[", "index", "]", "=", "dset_a", ".", "ref", "# Now, the dtype needs to be changed to the reference type and the", "# whole thing copied over to data_to_store.", "return", "data_refs", ".", "astype", "(", "ref_dtype", ")", ".", "copy", "(", ")" ]
Writes an array of objects recursively. Writes the elements of the given object array recursively in the HDF5 Group ``options.group_for_references`` and returns an ``h5py.Reference`` array to all the elements. Parameters ---------- f : h5py.File The HDF5 file handle that is open. data : numpy.ndarray of objects Numpy object array to write the elements of. options : hdf5storage.core.Options hdf5storage options object. Returns ------- obj_array : numpy.ndarray of h5py.Reference A reference array pointing to all the elements written to the HDF5 file. For those that couldn't be written, the respective element points to the canonical empty. Raises ------ TypeNotMatlabCompatibleError If writing a type not compatible with MATLAB and `options.action_for_matlab_incompatible` is set to ``'error'``. See Also -------- read_object_array hdf5storage.Options.group_for_references h5py.Reference
[ "Writes", "an", "array", "of", "objects", "recursively", "." ]
python
train
38.878505
googledatalab/pydatalab
solutionbox/image_classification/mltoolbox/image/classification/_api.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_api.py#L89-L97
def train(input_dir, batch_size, max_steps, output_dir, checkpoint=None, cloud=None): """Blocking version of train_async(). The only difference is that it blocks the caller until the job finishes, and it does not have a return value. """ with warnings.catch_warnings(): warnings.simplefilter("ignore") job = train_async(input_dir, batch_size, max_steps, output_dir, checkpoint, cloud) job.wait() print(job.state)
[ "def", "train", "(", "input_dir", ",", "batch_size", ",", "max_steps", ",", "output_dir", ",", "checkpoint", "=", "None", ",", "cloud", "=", "None", ")", ":", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ")", "job", "=", "train_async", "(", "input_dir", ",", "batch_size", ",", "max_steps", ",", "output_dir", ",", "checkpoint", ",", "cloud", ")", "job", ".", "wait", "(", ")", "print", "(", "job", ".", "state", ")" ]
Blocking version of train_async(). The only difference is that it blocks the caller until the job finishes, and it does not have a return value.
[ "Blocking", "version", "of", "train_async", "()", ".", "The", "only", "difference", "is", "that", "it", "blocks", "the", "caller", "until", "the", "job", "finishes", "and", "it", "does", "not", "have", "a", "return", "value", "." ]
python
train
47.888889
belbio/bel
bel/lang/semantics.py
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/semantics.py#L239-L370
def validate_arg_values(ast, bo): """Recursively validate arg (NSArg and StrArg) values Check that NSArgs are found in BELbio API and match appropriate entity_type. Check that StrArgs match their value - either default namespace or regex string Generate a WARNING if not. Args: bo: bel object Returns: bel object """ if not bo.api_url: log.info("No API endpoint defined") return bo log.debug(f"AST: {ast}") # Test NSArg terms if isinstance(ast, NSArg): term_id = "{}:{}".format(ast.namespace, ast.value) value_types = ast.value_types log.debug(f"Value types: {value_types} AST value: {ast.value}") # Default namespaces are defined in the bel_specification file if ast.namespace == "DEFAULT": # may use the DEFAULT namespace or not for value_type in value_types: default_namespace = [ ns["name"] for ns in bo.spec["namespaces"][value_type]["info"] ] + [ ns["abbreviation"] for ns in bo.spec["namespaces"][value_type]["info"] ] if ast.value in default_namespace: log.debug("Default namespace valid term: {}".format(term_id)) break else: # if for loop doesn't hit the break, run this else log.debug("Default namespace invalid term: {}".format(term_id)) bo.validation_messages.append( ("WARNING", f"Default Term: {term_id} not found") ) # Process normal, non-default-namespace terms else: request_url = bo.api_url + "/terms/{}".format( url_path_param_quoting(term_id) ) log.info(f"Validate Arg Values url {request_url}") r = get_url(request_url) if r and r.status_code == 200: result = r.json() # function signature term value_types doesn't match up with API term entity_types log.debug( f'AST.value_types {ast.value_types} Entity types {result.get("entity_types", [])}' ) # Check that entity types match if ( len( set(ast.value_types).intersection( result.get("entity_types", []) ) ) == 0 ): log.debug( "Invalid Term - statement term {} allowable entity types: {} do not match API term entity types: {}".format( term_id, ast.value_types, result.get("entity_types", []) ) ) bo.validation_messages.append( ( "WARNING", "Invalid Term - statement term {} allowable entity types: {} do not match API term entity types: {}".format( term_id, ast.value_types, result.get("entity_types", []) ), ) ) if term_id in result.get("obsolete_ids", []): bo.validation_messages.append( ( "WARNING", f'Obsolete term: {term_id} Current term: {result["id"]}', ) ) elif r.status_code == 404: bo.validation_messages.append( ("WARNING", f"Term: {term_id} not found in namespace") ) else: log.error(f"Status {r.status_code} - Bad URL: {request_url}") # Process StrArgs if isinstance(ast, StrArg): log.debug(f" Check String Arg: {ast.value} {ast.value_types}") for value_type in ast.value_types: # Is this a regex to match against if re.match("/", value_type): value_type = re.sub("^/", "", value_type) value_type = re.sub("/$", "", value_type) match = re.match(value_type, ast.value) if match: break if value_type in bo.spec["namespaces"]: default_namespace = [ ns["name"] for ns in bo.spec["namespaces"][value_type]["info"] ] + [ ns["abbreviation"] for ns in bo.spec["namespaces"][value_type]["info"] ] if ast.value in default_namespace: break else: # If for loop doesn't hit the break, no matches found, therefore for StrArg value is bad bo.validation_messages.append( ( "WARNING", f"String value {ast.value} does not match default namespace value or regex pattern: {ast.value_types}", ) ) # Recursively process every NSArg by processing BELAst and Functions if hasattr(ast, "args"): for arg in ast.args: validate_arg_values(arg, bo) return bo
[ "def", "validate_arg_values", "(", "ast", ",", "bo", ")", ":", "if", "not", "bo", ".", "api_url", ":", "log", ".", "info", "(", "\"No API endpoint defined\"", ")", "return", "bo", "log", ".", "debug", "(", "f\"AST: {ast}\"", ")", "# Test NSArg terms", "if", "isinstance", "(", "ast", ",", "NSArg", ")", ":", "term_id", "=", "\"{}:{}\"", ".", "format", "(", "ast", ".", "namespace", ",", "ast", ".", "value", ")", "value_types", "=", "ast", ".", "value_types", "log", ".", "debug", "(", "f\"Value types: {value_types} AST value: {ast.value}\"", ")", "# Default namespaces are defined in the bel_specification file", "if", "ast", ".", "namespace", "==", "\"DEFAULT\"", ":", "# may use the DEFAULT namespace or not", "for", "value_type", "in", "value_types", ":", "default_namespace", "=", "[", "ns", "[", "\"name\"", "]", "for", "ns", "in", "bo", ".", "spec", "[", "\"namespaces\"", "]", "[", "value_type", "]", "[", "\"info\"", "]", "]", "+", "[", "ns", "[", "\"abbreviation\"", "]", "for", "ns", "in", "bo", ".", "spec", "[", "\"namespaces\"", "]", "[", "value_type", "]", "[", "\"info\"", "]", "]", "if", "ast", ".", "value", "in", "default_namespace", ":", "log", ".", "debug", "(", "\"Default namespace valid term: {}\"", ".", "format", "(", "term_id", ")", ")", "break", "else", ":", "# if for loop doesn't hit the break, run this else", "log", ".", "debug", "(", "\"Default namespace invalid term: {}\"", ".", "format", "(", "term_id", ")", ")", "bo", ".", "validation_messages", ".", "append", "(", "(", "\"WARNING\"", ",", "f\"Default Term: {term_id} not found\"", ")", ")", "# Process normal, non-default-namespace terms", "else", ":", "request_url", "=", "bo", ".", "api_url", "+", "\"/terms/{}\"", ".", "format", "(", "url_path_param_quoting", "(", "term_id", ")", ")", "log", ".", "info", "(", "f\"Validate Arg Values url {request_url}\"", ")", "r", "=", "get_url", "(", "request_url", ")", "if", "r", "and", "r", ".", "status_code", "==", "200", ":", "result", "=", "r", ".", "json", "(", ")", "# function signature term value_types doesn't match up with API term entity_types", "log", ".", "debug", "(", "f'AST.value_types {ast.value_types} Entity types {result.get(\"entity_types\", [])}'", ")", "# Check that entity types match", "if", "(", "len", "(", "set", "(", "ast", ".", "value_types", ")", ".", "intersection", "(", "result", ".", "get", "(", "\"entity_types\"", ",", "[", "]", ")", ")", ")", "==", "0", ")", ":", "log", ".", "debug", "(", "\"Invalid Term - statement term {} allowable entity types: {} do not match API term entity types: {}\"", ".", "format", "(", "term_id", ",", "ast", ".", "value_types", ",", "result", ".", "get", "(", "\"entity_types\"", ",", "[", "]", ")", ")", ")", "bo", ".", "validation_messages", ".", "append", "(", "(", "\"WARNING\"", ",", "\"Invalid Term - statement term {} allowable entity types: {} do not match API term entity types: {}\"", ".", "format", "(", "term_id", ",", "ast", ".", "value_types", ",", "result", ".", "get", "(", "\"entity_types\"", ",", "[", "]", ")", ")", ",", ")", ")", "if", "term_id", "in", "result", ".", "get", "(", "\"obsolete_ids\"", ",", "[", "]", ")", ":", "bo", ".", "validation_messages", ".", "append", "(", "(", "\"WARNING\"", ",", "f'Obsolete term: {term_id} Current term: {result[\"id\"]}'", ",", ")", ")", "elif", "r", ".", "status_code", "==", "404", ":", "bo", ".", "validation_messages", ".", "append", "(", "(", "\"WARNING\"", ",", "f\"Term: {term_id} not found in namespace\"", ")", ")", "else", ":", "log", ".", "error", "(", "f\"Status {r.status_code} - Bad URL: {request_url}\"", ")", "# Process StrArgs", "if", "isinstance", "(", "ast", ",", "StrArg", ")", ":", "log", ".", "debug", "(", "f\" Check String Arg: {ast.value} {ast.value_types}\"", ")", "for", "value_type", "in", "ast", ".", "value_types", ":", "# Is this a regex to match against", "if", "re", ".", "match", "(", "\"/\"", ",", "value_type", ")", ":", "value_type", "=", "re", ".", "sub", "(", "\"^/\"", ",", "\"\"", ",", "value_type", ")", "value_type", "=", "re", ".", "sub", "(", "\"/$\"", ",", "\"\"", ",", "value_type", ")", "match", "=", "re", ".", "match", "(", "value_type", ",", "ast", ".", "value", ")", "if", "match", ":", "break", "if", "value_type", "in", "bo", ".", "spec", "[", "\"namespaces\"", "]", ":", "default_namespace", "=", "[", "ns", "[", "\"name\"", "]", "for", "ns", "in", "bo", ".", "spec", "[", "\"namespaces\"", "]", "[", "value_type", "]", "[", "\"info\"", "]", "]", "+", "[", "ns", "[", "\"abbreviation\"", "]", "for", "ns", "in", "bo", ".", "spec", "[", "\"namespaces\"", "]", "[", "value_type", "]", "[", "\"info\"", "]", "]", "if", "ast", ".", "value", "in", "default_namespace", ":", "break", "else", ":", "# If for loop doesn't hit the break, no matches found, therefore for StrArg value is bad", "bo", ".", "validation_messages", ".", "append", "(", "(", "\"WARNING\"", ",", "f\"String value {ast.value} does not match default namespace value or regex pattern: {ast.value_types}\"", ",", ")", ")", "# Recursively process every NSArg by processing BELAst and Functions", "if", "hasattr", "(", "ast", ",", "\"args\"", ")", ":", "for", "arg", "in", "ast", ".", "args", ":", "validate_arg_values", "(", "arg", ",", "bo", ")", "return", "bo" ]
Recursively validate arg (NSArg and StrArg) values Check that NSArgs are found in BELbio API and match appropriate entity_type. Check that StrArgs match their value - either default namespace or regex string Generate a WARNING if not. Args: bo: bel object Returns: bel object
[ "Recursively", "validate", "arg", "(", "NSArg", "and", "StrArg", ")", "values" ]
python
train
38.924242
tanghaibao/goatools
goatools/parsers/david_chart.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/parsers/david_chart.py#L91-L100
def get_num_sig(self, alpha=0.05): """Print the number of significant results using various metrics.""" # Get the number of significant GO terms ctr = cx.Counter() flds = set(['FDR', 'Bonferroni', 'Benjamini', 'PValue']) for ntd in self.nts: for fld in flds: if getattr(ntd, fld) < alpha: ctr[fld] += 1 return ctr
[ "def", "get_num_sig", "(", "self", ",", "alpha", "=", "0.05", ")", ":", "# Get the number of significant GO terms", "ctr", "=", "cx", ".", "Counter", "(", ")", "flds", "=", "set", "(", "[", "'FDR'", ",", "'Bonferroni'", ",", "'Benjamini'", ",", "'PValue'", "]", ")", "for", "ntd", "in", "self", ".", "nts", ":", "for", "fld", "in", "flds", ":", "if", "getattr", "(", "ntd", ",", "fld", ")", "<", "alpha", ":", "ctr", "[", "fld", "]", "+=", "1", "return", "ctr" ]
Print the number of significant results using various metrics.
[ "Print", "the", "number", "of", "significant", "results", "using", "various", "metrics", "." ]
python
train
40
joshua-stone/DerPyBooru
derpibooru/search.py
https://github.com/joshua-stone/DerPyBooru/blob/75aec19488042ba89115ff002b4d696ad87fb03f/derpibooru/search.py#L135-L142
def limit(self, limit): """ Set absolute limit on number of images to return, or set to None to return as many results as needed; default 50 posts. """ params = join_params(self.parameters, {"limit": limit}) return self.__class__(**params)
[ "def", "limit", "(", "self", ",", "limit", ")", ":", "params", "=", "join_params", "(", "self", ".", "parameters", ",", "{", "\"limit\"", ":", "limit", "}", ")", "return", "self", ".", "__class__", "(", "*", "*", "params", ")" ]
Set absolute limit on number of images to return, or set to None to return as many results as needed; default 50 posts.
[ "Set", "absolute", "limit", "on", "number", "of", "images", "to", "return", "or", "set", "to", "None", "to", "return", "as", "many", "results", "as", "needed", ";", "default", "50", "posts", "." ]
python
train
32.125
hubo1016/namedstruct
namedstruct/namedstruct.py
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L226-L240
def _gettype(self): ''' Return current type of this struct :returns: a typedef object (e.g. nstruct) ''' current = self lastname = getattr(current._parser, 'typedef', None) while hasattr(current, '_sub'): current = current._sub tn = getattr(current._parser, 'typedef', None) if tn is not None: lastname = tn return lastname
[ "def", "_gettype", "(", "self", ")", ":", "current", "=", "self", "lastname", "=", "getattr", "(", "current", ".", "_parser", ",", "'typedef'", ",", "None", ")", "while", "hasattr", "(", "current", ",", "'_sub'", ")", ":", "current", "=", "current", ".", "_sub", "tn", "=", "getattr", "(", "current", ".", "_parser", ",", "'typedef'", ",", "None", ")", "if", "tn", "is", "not", "None", ":", "lastname", "=", "tn", "return", "lastname" ]
Return current type of this struct :returns: a typedef object (e.g. nstruct)
[ "Return", "current", "type", "of", "this", "struct", ":", "returns", ":", "a", "typedef", "object", "(", "e", ".", "g", ".", "nstruct", ")" ]
python
train
29.533333
wecatch/app-turbo
turbo/app.py
https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/app.py#L307-L314
def wo_resp(self, resp): """ can override for other style """ if self._data is not None: resp['res'] = self.to_str(self._data) return self.wo_json(resp)
[ "def", "wo_resp", "(", "self", ",", "resp", ")", ":", "if", "self", ".", "_data", "is", "not", "None", ":", "resp", "[", "'res'", "]", "=", "self", ".", "to_str", "(", "self", ".", "_data", ")", "return", "self", ".", "wo_json", "(", "resp", ")" ]
can override for other style
[ "can", "override", "for", "other", "style" ]
python
train
24.75
kakwa/ldapcherry
ldapcherry/backend/backendLdap.py
https://github.com/kakwa/ldapcherry/blob/b5e7cb6a44065abc30d164e72981b3713a172dda/ldapcherry/backend/backendLdap.py#L469-L515
def set_attrs(self, username, attrs): """ set user attributes""" ldap_client = self._bind() tmp = self._get_user(self._byte_p2(username), ALL_ATTRS) if tmp is None: raise UserDoesntExist(username, self.backend_name) dn = self._byte_p2(tmp[0]) old_attrs = tmp[1] for attr in attrs: bcontent = self._byte_p2(attrs[attr]) battr = self._byte_p2(attr) new = {battr: self._modlist(self._byte_p3(bcontent))} # if attr is dn entry, use rename if attr.lower() == self.dn_user_attr.lower(): ldap_client.rename_s( dn, ldap.dn.dn2str([[(battr, bcontent, 1)]]) ) dn = ldap.dn.dn2str( [[(battr, bcontent, 1)]] + ldap.dn.str2dn(dn)[1:] ) else: # if attr is already set, replace the value # (see dict old passed to modifyModlist) if attr in old_attrs: if type(old_attrs[attr]) is list: tmp = [] for value in old_attrs[attr]: tmp.append(self._byte_p2(value)) bold_value = tmp else: bold_value = self._modlist( self._byte_p3(old_attrs[attr]) ) old = {battr: bold_value} # attribute is not set, just add it else: old = {} ldif = modlist.modifyModlist(old, new) if ldif: try: ldap_client.modify_s(dn, ldif) except Exception as e: ldap_client.unbind_s() self._exception_handler(e) ldap_client.unbind_s()
[ "def", "set_attrs", "(", "self", ",", "username", ",", "attrs", ")", ":", "ldap_client", "=", "self", ".", "_bind", "(", ")", "tmp", "=", "self", ".", "_get_user", "(", "self", ".", "_byte_p2", "(", "username", ")", ",", "ALL_ATTRS", ")", "if", "tmp", "is", "None", ":", "raise", "UserDoesntExist", "(", "username", ",", "self", ".", "backend_name", ")", "dn", "=", "self", ".", "_byte_p2", "(", "tmp", "[", "0", "]", ")", "old_attrs", "=", "tmp", "[", "1", "]", "for", "attr", "in", "attrs", ":", "bcontent", "=", "self", ".", "_byte_p2", "(", "attrs", "[", "attr", "]", ")", "battr", "=", "self", ".", "_byte_p2", "(", "attr", ")", "new", "=", "{", "battr", ":", "self", ".", "_modlist", "(", "self", ".", "_byte_p3", "(", "bcontent", ")", ")", "}", "# if attr is dn entry, use rename", "if", "attr", ".", "lower", "(", ")", "==", "self", ".", "dn_user_attr", ".", "lower", "(", ")", ":", "ldap_client", ".", "rename_s", "(", "dn", ",", "ldap", ".", "dn", ".", "dn2str", "(", "[", "[", "(", "battr", ",", "bcontent", ",", "1", ")", "]", "]", ")", ")", "dn", "=", "ldap", ".", "dn", ".", "dn2str", "(", "[", "[", "(", "battr", ",", "bcontent", ",", "1", ")", "]", "]", "+", "ldap", ".", "dn", ".", "str2dn", "(", "dn", ")", "[", "1", ":", "]", ")", "else", ":", "# if attr is already set, replace the value", "# (see dict old passed to modifyModlist)", "if", "attr", "in", "old_attrs", ":", "if", "type", "(", "old_attrs", "[", "attr", "]", ")", "is", "list", ":", "tmp", "=", "[", "]", "for", "value", "in", "old_attrs", "[", "attr", "]", ":", "tmp", ".", "append", "(", "self", ".", "_byte_p2", "(", "value", ")", ")", "bold_value", "=", "tmp", "else", ":", "bold_value", "=", "self", ".", "_modlist", "(", "self", ".", "_byte_p3", "(", "old_attrs", "[", "attr", "]", ")", ")", "old", "=", "{", "battr", ":", "bold_value", "}", "# attribute is not set, just add it", "else", ":", "old", "=", "{", "}", "ldif", "=", "modlist", ".", "modifyModlist", "(", "old", ",", "new", ")", "if", "ldif", ":", "try", ":", "ldap_client", ".", "modify_s", "(", "dn", ",", "ldif", ")", "except", "Exception", "as", "e", ":", "ldap_client", ".", "unbind_s", "(", ")", "self", ".", "_exception_handler", "(", "e", ")", "ldap_client", ".", "unbind_s", "(", ")" ]
set user attributes
[ "set", "user", "attributes" ]
python
train
40.361702
all-umass/graphs
graphs/base/base.py
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/base/base.py#L150-L163
def to_graph_tool(self): '''Converts this Graph object to a graph_tool-compatible object. Requires the graph_tool library. Note that the internal ordering of graph_tool seems to be column-major.''' # Import here to avoid ImportErrors when graph_tool isn't available. import graph_tool gt = graph_tool.Graph(directed=self.is_directed()) gt.add_edge_list(self.pairs()) if self.is_weighted(): weights = gt.new_edge_property('double') for e,w in zip(gt.edges(), self.edge_weights()): weights[e] = w gt.edge_properties['weight'] = weights return gt
[ "def", "to_graph_tool", "(", "self", ")", ":", "# Import here to avoid ImportErrors when graph_tool isn't available.", "import", "graph_tool", "gt", "=", "graph_tool", ".", "Graph", "(", "directed", "=", "self", ".", "is_directed", "(", ")", ")", "gt", ".", "add_edge_list", "(", "self", ".", "pairs", "(", ")", ")", "if", "self", ".", "is_weighted", "(", ")", ":", "weights", "=", "gt", ".", "new_edge_property", "(", "'double'", ")", "for", "e", ",", "w", "in", "zip", "(", "gt", ".", "edges", "(", ")", ",", "self", ".", "edge_weights", "(", ")", ")", ":", "weights", "[", "e", "]", "=", "w", "gt", ".", "edge_properties", "[", "'weight'", "]", "=", "weights", "return", "gt" ]
Converts this Graph object to a graph_tool-compatible object. Requires the graph_tool library. Note that the internal ordering of graph_tool seems to be column-major.
[ "Converts", "this", "Graph", "object", "to", "a", "graph_tool", "-", "compatible", "object", ".", "Requires", "the", "graph_tool", "library", ".", "Note", "that", "the", "internal", "ordering", "of", "graph_tool", "seems", "to", "be", "column", "-", "major", "." ]
python
train
42.285714
kgori/treeCl
treeCl/tasks.py
https://github.com/kgori/treeCl/blob/fed624b3db1c19cc07175ca04e3eda6905a8d305/treeCl/tasks.py#L64-L71
def rfdist_task(newick_string_a, newick_string_b, normalise, min_overlap=4, overlap_fail_value=0): """ Distributed version of tree_distance.rfdist Parameters: two valid newick strings and a boolean """ tree_a = Tree(newick_string_a) tree_b = Tree(newick_string_b) return treedist.rfdist(tree_a, tree_b, normalise, min_overlap, overlap_fail_value)
[ "def", "rfdist_task", "(", "newick_string_a", ",", "newick_string_b", ",", "normalise", ",", "min_overlap", "=", "4", ",", "overlap_fail_value", "=", "0", ")", ":", "tree_a", "=", "Tree", "(", "newick_string_a", ")", "tree_b", "=", "Tree", "(", "newick_string_b", ")", "return", "treedist", ".", "rfdist", "(", "tree_a", ",", "tree_b", ",", "normalise", ",", "min_overlap", ",", "overlap_fail_value", ")" ]
Distributed version of tree_distance.rfdist Parameters: two valid newick strings and a boolean
[ "Distributed", "version", "of", "tree_distance", ".", "rfdist", "Parameters", ":", "two", "valid", "newick", "strings", "and", "a", "boolean" ]
python
train
45.875
daler/trackhub
trackhub/helpers.py
https://github.com/daler/trackhub/blob/e4655f79177822529f80b923df117e38e28df702/trackhub/helpers.py#L69-L97
def sanitize(s, strict=True): """ Sanitize a string. Spaces are converted to underscore; if strict=True they are then removed. Parameters ---------- s : str String to sanitize strict : bool If True, only alphanumeric characters are allowed. If False, a limited set of additional characters (-._) will be allowed. """ allowed = ''.join( [ 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'abcdefghijklmnopqrstuvwxyz', '0123456789', ] ) if not strict: allowed += '-_.' s = str(s).replace(' ', '_') return ''.join([i for i in s if i in allowed])
[ "def", "sanitize", "(", "s", ",", "strict", "=", "True", ")", ":", "allowed", "=", "''", ".", "join", "(", "[", "'ABCDEFGHIJKLMNOPQRSTUVWXYZ'", ",", "'abcdefghijklmnopqrstuvwxyz'", ",", "'0123456789'", ",", "]", ")", "if", "not", "strict", ":", "allowed", "+=", "'-_.'", "s", "=", "str", "(", "s", ")", ".", "replace", "(", "' '", ",", "'_'", ")", "return", "''", ".", "join", "(", "[", "i", "for", "i", "in", "s", "if", "i", "in", "allowed", "]", ")" ]
Sanitize a string. Spaces are converted to underscore; if strict=True they are then removed. Parameters ---------- s : str String to sanitize strict : bool If True, only alphanumeric characters are allowed. If False, a limited set of additional characters (-._) will be allowed.
[ "Sanitize", "a", "string", "." ]
python
train
21.965517
mattja/nsim
nsim/analyses1/pyeeg.py
https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/pyeeg.py#L736-L834
def permutation_entropy(x, n, tau): """Compute Permutation Entropy of a given time series x, specified by permutation order n and embedding lag tau. Parameters ---------- x list a time series n integer Permutation order tau integer Embedding lag Returns ---------- PE float permutation entropy Notes ---------- Suppose the given time series is X =[x(1),x(2),x(3),...,x(N)]. We first build embedding matrix Em, of dimension(n*N-n+1), such that the ith row of Em is x(i),x(i+1),..x(i+n-1). Hence the embedding lag and the embedding dimension are 1 and n respectively. We build this matrix from a given time series, X, by calling pyEEg function embed_seq(x,1,n). We then transform each row of the embedding matrix into a new sequence, comprising a set of integers in range of 0,..,n-1. The order in which the integers are placed within a row is the same as those of the original elements:0 is placed where the smallest element of the row was and n-1 replaces the largest element of the row. To calculate the Permutation entropy, we calculate the entropy of PeSeq. In doing so, we count the number of occurrences of each permutation in PeSeq and write it in a sequence, RankMat. We then use this sequence to calculate entropy by using Shannon's entropy formula. Permutation entropy is usually calculated with n in range of 3 and 7. References ---------- Bandt, Christoph, and Bernd Pompe. "Permutation entropy: a natural complexity measure for time series." Physical Review Letters 88.17 (2002): 174102. Examples ---------- >>> import pyeeg >>> x = [1,2,4,5,12,3,4,5] >>> pyeeg.permutation_entropy(x,5,1) 2.0 """ PeSeq = [] Em = embed_seq(x, tau, n) for i in range(0, len(Em)): r = [] z = [] for j in range(0, len(Em[i])): z.append(Em[i][j]) for j in range(0, len(Em[i])): z.sort() r.append(z.index(Em[i][j])) z[z.index(Em[i][j])] = -1 PeSeq.append(r) RankMat = [] while len(PeSeq) > 0: RankMat.append(PeSeq.count(PeSeq[0])) x = PeSeq[0] for j in range(0, PeSeq.count(PeSeq[0])): PeSeq.pop(PeSeq.index(x)) RankMat = numpy.array(RankMat) RankMat = numpy.true_divide(RankMat, RankMat.sum()) EntropyMat = numpy.multiply(numpy.log2(RankMat), RankMat) PE = -1 * EntropyMat.sum() return PE
[ "def", "permutation_entropy", "(", "x", ",", "n", ",", "tau", ")", ":", "PeSeq", "=", "[", "]", "Em", "=", "embed_seq", "(", "x", ",", "tau", ",", "n", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "Em", ")", ")", ":", "r", "=", "[", "]", "z", "=", "[", "]", "for", "j", "in", "range", "(", "0", ",", "len", "(", "Em", "[", "i", "]", ")", ")", ":", "z", ".", "append", "(", "Em", "[", "i", "]", "[", "j", "]", ")", "for", "j", "in", "range", "(", "0", ",", "len", "(", "Em", "[", "i", "]", ")", ")", ":", "z", ".", "sort", "(", ")", "r", ".", "append", "(", "z", ".", "index", "(", "Em", "[", "i", "]", "[", "j", "]", ")", ")", "z", "[", "z", ".", "index", "(", "Em", "[", "i", "]", "[", "j", "]", ")", "]", "=", "-", "1", "PeSeq", ".", "append", "(", "r", ")", "RankMat", "=", "[", "]", "while", "len", "(", "PeSeq", ")", ">", "0", ":", "RankMat", ".", "append", "(", "PeSeq", ".", "count", "(", "PeSeq", "[", "0", "]", ")", ")", "x", "=", "PeSeq", "[", "0", "]", "for", "j", "in", "range", "(", "0", ",", "PeSeq", ".", "count", "(", "PeSeq", "[", "0", "]", ")", ")", ":", "PeSeq", ".", "pop", "(", "PeSeq", ".", "index", "(", "x", ")", ")", "RankMat", "=", "numpy", ".", "array", "(", "RankMat", ")", "RankMat", "=", "numpy", ".", "true_divide", "(", "RankMat", ",", "RankMat", ".", "sum", "(", ")", ")", "EntropyMat", "=", "numpy", ".", "multiply", "(", "numpy", ".", "log2", "(", "RankMat", ")", ",", "RankMat", ")", "PE", "=", "-", "1", "*", "EntropyMat", ".", "sum", "(", ")", "return", "PE" ]
Compute Permutation Entropy of a given time series x, specified by permutation order n and embedding lag tau. Parameters ---------- x list a time series n integer Permutation order tau integer Embedding lag Returns ---------- PE float permutation entropy Notes ---------- Suppose the given time series is X =[x(1),x(2),x(3),...,x(N)]. We first build embedding matrix Em, of dimension(n*N-n+1), such that the ith row of Em is x(i),x(i+1),..x(i+n-1). Hence the embedding lag and the embedding dimension are 1 and n respectively. We build this matrix from a given time series, X, by calling pyEEg function embed_seq(x,1,n). We then transform each row of the embedding matrix into a new sequence, comprising a set of integers in range of 0,..,n-1. The order in which the integers are placed within a row is the same as those of the original elements:0 is placed where the smallest element of the row was and n-1 replaces the largest element of the row. To calculate the Permutation entropy, we calculate the entropy of PeSeq. In doing so, we count the number of occurrences of each permutation in PeSeq and write it in a sequence, RankMat. We then use this sequence to calculate entropy by using Shannon's entropy formula. Permutation entropy is usually calculated with n in range of 3 and 7. References ---------- Bandt, Christoph, and Bernd Pompe. "Permutation entropy: a natural complexity measure for time series." Physical Review Letters 88.17 (2002): 174102. Examples ---------- >>> import pyeeg >>> x = [1,2,4,5,12,3,4,5] >>> pyeeg.permutation_entropy(x,5,1) 2.0
[ "Compute", "Permutation", "Entropy", "of", "a", "given", "time", "series", "x", "specified", "by", "permutation", "order", "n", "and", "embedding", "lag", "tau", "." ]
python
train
25.191919
sbarham/dsrt
dsrt/data/SampleSet.py
https://github.com/sbarham/dsrt/blob/bc664739f2f52839461d3e72773b71146fd56a9a/dsrt/data/SampleSet.py#L42-L46
def load_sampleset(self, f, name): '''Read the sampleset from using the HDF5 format. Name is usually in {train, test}.''' self.encoder_x = np.array(f[name + '_encoder_x']) self.decoder_x = np.array(f[name + '_decoder_x']) self.decoder_y = np.array(f[name + '_decoder_y'])
[ "def", "load_sampleset", "(", "self", ",", "f", ",", "name", ")", ":", "self", ".", "encoder_x", "=", "np", ".", "array", "(", "f", "[", "name", "+", "'_encoder_x'", "]", ")", "self", ".", "decoder_x", "=", "np", ".", "array", "(", "f", "[", "name", "+", "'_decoder_x'", "]", ")", "self", ".", "decoder_y", "=", "np", ".", "array", "(", "f", "[", "name", "+", "'_decoder_y'", "]", ")" ]
Read the sampleset from using the HDF5 format. Name is usually in {train, test}.
[ "Read", "the", "sampleset", "from", "using", "the", "HDF5", "format", ".", "Name", "is", "usually", "in", "{", "train", "test", "}", "." ]
python
train
59.8
senaite/senaite.core
bika/lims/content/analysisrequest.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/analysisrequest.py#L2289-L2296
def setParentAnalysisRequest(self, value): """Sets a parent analysis request, making the current a partition """ self.Schema().getField("ParentAnalysisRequest").set(self, value) if not value: noLongerProvides(self, IAnalysisRequestPartition) else: alsoProvides(self, IAnalysisRequestPartition)
[ "def", "setParentAnalysisRequest", "(", "self", ",", "value", ")", ":", "self", ".", "Schema", "(", ")", ".", "getField", "(", "\"ParentAnalysisRequest\"", ")", ".", "set", "(", "self", ",", "value", ")", "if", "not", "value", ":", "noLongerProvides", "(", "self", ",", "IAnalysisRequestPartition", ")", "else", ":", "alsoProvides", "(", "self", ",", "IAnalysisRequestPartition", ")" ]
Sets a parent analysis request, making the current a partition
[ "Sets", "a", "parent", "analysis", "request", "making", "the", "current", "a", "partition" ]
python
train
43.75
appknox/google-chartwrapper
GChartWrapper/GChart.py
https://github.com/appknox/google-chartwrapper/blob/3769aecbef6c83b6cd93ee72ece478ffe433ac57/GChartWrapper/GChart.py#L343-L360
def fill(self, *args): """ Apply a solid fill to your chart args are of the form <fill type>,<fill style>,... fill type must be one of c,bg,a fill style must be one of s,lg,ls the rest of the args refer to the particular style APIPARAM: chf """ a,b = args[:2] assert a in ('c','bg','a'), 'Fill type must be bg/c/a not %s'%a assert b in ('s','lg','ls'), 'Fill style must be s/lg/ls not %s'%b if len(args) == 3: args = color_args(args, 2) else: args = color_args(args, 3,5) self.fills.append(','.join(map(str,args))) return self
[ "def", "fill", "(", "self", ",", "*", "args", ")", ":", "a", ",", "b", "=", "args", "[", ":", "2", "]", "assert", "a", "in", "(", "'c'", ",", "'bg'", ",", "'a'", ")", ",", "'Fill type must be bg/c/a not %s'", "%", "a", "assert", "b", "in", "(", "'s'", ",", "'lg'", ",", "'ls'", ")", ",", "'Fill style must be s/lg/ls not %s'", "%", "b", "if", "len", "(", "args", ")", "==", "3", ":", "args", "=", "color_args", "(", "args", ",", "2", ")", "else", ":", "args", "=", "color_args", "(", "args", ",", "3", ",", "5", ")", "self", ".", "fills", ".", "append", "(", "','", ".", "join", "(", "map", "(", "str", ",", "args", ")", ")", ")", "return", "self" ]
Apply a solid fill to your chart args are of the form <fill type>,<fill style>,... fill type must be one of c,bg,a fill style must be one of s,lg,ls the rest of the args refer to the particular style APIPARAM: chf
[ "Apply", "a", "solid", "fill", "to", "your", "chart", "args", "are", "of", "the", "form", "<fill", "type", ">", "<fill", "style", ">", "...", "fill", "type", "must", "be", "one", "of", "c", "bg", "a", "fill", "style", "must", "be", "one", "of", "s", "lg", "ls", "the", "rest", "of", "the", "args", "refer", "to", "the", "particular", "style", "APIPARAM", ":", "chf" ]
python
test
36.277778
PlaidWeb/Publ
publ/image/image.py
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/image/image.py#L36-L55
def get_img_attrs(self, style=None, **kwargs): """ Get an attribute list (src, srcset, style, et al) for the image. style -- an optional list of CSS style fragments Returns: a dict of attributes e.g. {'src':'foo.jpg','srcset':'foo.jpg 1x, bar.jpg 2x'] """ add = {} if 'prefix' in kwargs: attr_prefixes = kwargs.get('prefix') if isinstance(kwargs['prefix'], str): attr_prefixes = [attr_prefixes] for prefix in attr_prefixes: for k, val in kwargs.items(): if k.startswith(prefix): add[k[len(prefix):]] = val return self._get_img_attrs(style, {**kwargs, **add})
[ "def", "get_img_attrs", "(", "self", ",", "style", "=", "None", ",", "*", "*", "kwargs", ")", ":", "add", "=", "{", "}", "if", "'prefix'", "in", "kwargs", ":", "attr_prefixes", "=", "kwargs", ".", "get", "(", "'prefix'", ")", "if", "isinstance", "(", "kwargs", "[", "'prefix'", "]", ",", "str", ")", ":", "attr_prefixes", "=", "[", "attr_prefixes", "]", "for", "prefix", "in", "attr_prefixes", ":", "for", "k", ",", "val", "in", "kwargs", ".", "items", "(", ")", ":", "if", "k", ".", "startswith", "(", "prefix", ")", ":", "add", "[", "k", "[", "len", "(", "prefix", ")", ":", "]", "]", "=", "val", "return", "self", ".", "_get_img_attrs", "(", "style", ",", "{", "*", "*", "kwargs", ",", "*", "*", "add", "}", ")" ]
Get an attribute list (src, srcset, style, et al) for the image. style -- an optional list of CSS style fragments Returns: a dict of attributes e.g. {'src':'foo.jpg','srcset':'foo.jpg 1x, bar.jpg 2x']
[ "Get", "an", "attribute", "list", "(", "src", "srcset", "style", "et", "al", ")", "for", "the", "image", "." ]
python
train
35.6
flo-compbio/genometools
genometools/basic/gene_set_collection.py
https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/basic/gene_set_collection.py#L159-L180
def get_by_index(self, i): """Look up a gene set by its index. Parameters ---------- i: int The index of the gene set. Returns ------- GeneSet The gene set. Raises ------ ValueError If the given index is out of bounds. """ if i >= self.n: raise ValueError('Index %d out of bounds ' % i + 'for database with %d gene sets.' % self.n) return self._gene_sets[self._gene_set_ids[i]]
[ "def", "get_by_index", "(", "self", ",", "i", ")", ":", "if", "i", ">=", "self", ".", "n", ":", "raise", "ValueError", "(", "'Index %d out of bounds '", "%", "i", "+", "'for database with %d gene sets.'", "%", "self", ".", "n", ")", "return", "self", ".", "_gene_sets", "[", "self", ".", "_gene_set_ids", "[", "i", "]", "]" ]
Look up a gene set by its index. Parameters ---------- i: int The index of the gene set. Returns ------- GeneSet The gene set. Raises ------ ValueError If the given index is out of bounds.
[ "Look", "up", "a", "gene", "set", "by", "its", "index", "." ]
python
train
24.545455
etingof/pysnmp
pysnmp/smi/rfc1902.py
https://github.com/etingof/pysnmp/blob/cde062dd42f67dfd2d7686286a322d40e9c3a4b7/pysnmp/smi/rfc1902.py#L158-L193
def getLabel(self): """Returns symbolic path to this MIB variable. Meaning a sequence of symbolic identifications for each of parent MIB objects in MIB tree. Returns ------- tuple sequence of names of nodes in a MIB tree from the top of the tree towards this MIB variable. Raises ------ SmiError If MIB variable conversion has not been performed. Notes ----- Returned sequence may not contain full path to this MIB variable if some symbols are now known at the moment of MIB look up. Examples -------- >>> objectIdentity = ObjectIdentity('SNMPv2-MIB', 'sysDescr', 0) >>> objectIdentity.resolveWithMib(mibViewController) >>> objectIdentity.getOid() ('iso', 'org', 'dod', 'internet', 'mgmt', 'mib-2', 'system', 'sysDescr') >>> """ if self._state & self.ST_CLEAN: return self._label else: raise SmiError( '%s object not fully initialized' % self.__class__.__name__)
[ "def", "getLabel", "(", "self", ")", ":", "if", "self", ".", "_state", "&", "self", ".", "ST_CLEAN", ":", "return", "self", ".", "_label", "else", ":", "raise", "SmiError", "(", "'%s object not fully initialized'", "%", "self", ".", "__class__", ".", "__name__", ")" ]
Returns symbolic path to this MIB variable. Meaning a sequence of symbolic identifications for each of parent MIB objects in MIB tree. Returns ------- tuple sequence of names of nodes in a MIB tree from the top of the tree towards this MIB variable. Raises ------ SmiError If MIB variable conversion has not been performed. Notes ----- Returned sequence may not contain full path to this MIB variable if some symbols are now known at the moment of MIB look up. Examples -------- >>> objectIdentity = ObjectIdentity('SNMPv2-MIB', 'sysDescr', 0) >>> objectIdentity.resolveWithMib(mibViewController) >>> objectIdentity.getOid() ('iso', 'org', 'dod', 'internet', 'mgmt', 'mib-2', 'system', 'sysDescr') >>>
[ "Returns", "symbolic", "path", "to", "this", "MIB", "variable", "." ]
python
train
30.333333
PyCQA/pylint
pylint/checkers/base.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/checkers/base.py#L997-L1001
def open(self): """initialize visit variables and statistics """ self._tryfinallys = [] self.stats = self.linter.add_stats(module=0, function=0, method=0, class_=0)
[ "def", "open", "(", "self", ")", ":", "self", ".", "_tryfinallys", "=", "[", "]", "self", ".", "stats", "=", "self", ".", "linter", ".", "add_stats", "(", "module", "=", "0", ",", "function", "=", "0", ",", "method", "=", "0", ",", "class_", "=", "0", ")" ]
initialize visit variables and statistics
[ "initialize", "visit", "variables", "and", "statistics" ]
python
test
38.4
google/openhtf
openhtf/plugs/usb/filesync_service.py
https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/filesync_service.py#L205-L227
def _check_for_fail_message(self, transport, exc_info, timeout): # pylint: disable=no-self-use """Check for a 'FAIL' message from transport. This method always raises, if 'FAIL' was read, it will raise an AdbRemoteError with the message, otherwise it will raise based on exc_info, which should be a tuple as per sys.exc_info(). Args: transport: Transport from which to read for a 'FAIL' message. exc_info: Exception info to raise if no 'FAIL' is read. timeout: Timeout to use for the read operation. Raises: AdbRemoteError: If a 'FAIL' is read, otherwise raises exc_info. """ try: transport.read_message(timeout) except usb_exceptions.CommonUsbError: # If we got a remote error, raise that exception. if sys.exc_info()[0] is usb_exceptions.AdbRemoteError: raise # Otherwise reraise the original exception. raise_with_traceback(exc_info[0](exc_info[1]), traceback=exc_info[2])
[ "def", "_check_for_fail_message", "(", "self", ",", "transport", ",", "exc_info", ",", "timeout", ")", ":", "# pylint: disable=no-self-use", "try", ":", "transport", ".", "read_message", "(", "timeout", ")", "except", "usb_exceptions", ".", "CommonUsbError", ":", "# If we got a remote error, raise that exception.", "if", "sys", ".", "exc_info", "(", ")", "[", "0", "]", "is", "usb_exceptions", ".", "AdbRemoteError", ":", "raise", "# Otherwise reraise the original exception.", "raise_with_traceback", "(", "exc_info", "[", "0", "]", "(", "exc_info", "[", "1", "]", ")", ",", "traceback", "=", "exc_info", "[", "2", "]", ")" ]
Check for a 'FAIL' message from transport. This method always raises, if 'FAIL' was read, it will raise an AdbRemoteError with the message, otherwise it will raise based on exc_info, which should be a tuple as per sys.exc_info(). Args: transport: Transport from which to read for a 'FAIL' message. exc_info: Exception info to raise if no 'FAIL' is read. timeout: Timeout to use for the read operation. Raises: AdbRemoteError: If a 'FAIL' is read, otherwise raises exc_info.
[ "Check", "for", "a", "FAIL", "message", "from", "transport", "." ]
python
train
41.347826
broadinstitute/fiss
firecloud/fccore.py
https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fccore.py#L122-L183
def config_parse(files=None, config=None, config_profile=".fissconfig", **kwargs): ''' Read initial configuration state, from named config files; store this state within a config dictionary (which may be nested) whose keys may also be referenced as attributes (safely, defaulting to None if unset). A config object may be passed in, as a way of accumulating or overwriting configuration state; if one is NOT passed, the default config obj is used ''' local_config = config config = __fcconfig cfgparser = configparser.SafeConfigParser() filenames = list() # Give personal/user followed by current working directory configuration the first say filenames.append(os.path.join(os.path.expanduser('~'), config_profile)) filenames.append(os.path.join(os.getcwd(), config_profile)) if files: if isinstance(files, string_types): filenames.append(files) elif isinstance(files, Iterable): for f in files: if isinstance(f, IOBase): f = f.name filenames.append(f) cfgparser.read(filenames) # [DEFAULT] defines common variables for interpolation/substitution in # other sections, and are stored at the root level of the config object for keyval in cfgparser.items('DEFAULT'): #print("config_parse: adding config variable %s=%s" % (keyval[0], str(keyval[1]))) __fcconfig[keyval[0]] = keyval[1] for section in cfgparser.sections(): config[section] = attrdict() for option in cfgparser.options(section): # DEFAULT vars ALSO behave as though they were defined in every # section, but we purposely skip them here so that each section # reflects only the options explicitly defined in that section if not config[option]: config[section][option] = cfgparser.get(section, option) config.verbosity = int(config.verbosity) if not config.root_url.endswith('/'): config.root_url += '/' if os.path.isfile(config.credentials): os.environ[environment_vars.CREDENTIALS] = config.credentials # if local_config override options with passed options if local_config is not None: for key, value in local_config.items(): config[key] = value # if any explict config options are passed override. for key, value in kwargs.items(): config[key] = value return config
[ "def", "config_parse", "(", "files", "=", "None", ",", "config", "=", "None", ",", "config_profile", "=", "\".fissconfig\"", ",", "*", "*", "kwargs", ")", ":", "local_config", "=", "config", "config", "=", "__fcconfig", "cfgparser", "=", "configparser", ".", "SafeConfigParser", "(", ")", "filenames", "=", "list", "(", ")", "# Give personal/user followed by current working directory configuration the first say", "filenames", ".", "append", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "expanduser", "(", "'~'", ")", ",", "config_profile", ")", ")", "filenames", ".", "append", "(", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "config_profile", ")", ")", "if", "files", ":", "if", "isinstance", "(", "files", ",", "string_types", ")", ":", "filenames", ".", "append", "(", "files", ")", "elif", "isinstance", "(", "files", ",", "Iterable", ")", ":", "for", "f", "in", "files", ":", "if", "isinstance", "(", "f", ",", "IOBase", ")", ":", "f", "=", "f", ".", "name", "filenames", ".", "append", "(", "f", ")", "cfgparser", ".", "read", "(", "filenames", ")", "# [DEFAULT] defines common variables for interpolation/substitution in", "# other sections, and are stored at the root level of the config object", "for", "keyval", "in", "cfgparser", ".", "items", "(", "'DEFAULT'", ")", ":", "#print(\"config_parse: adding config variable %s=%s\" % (keyval[0], str(keyval[1])))", "__fcconfig", "[", "keyval", "[", "0", "]", "]", "=", "keyval", "[", "1", "]", "for", "section", "in", "cfgparser", ".", "sections", "(", ")", ":", "config", "[", "section", "]", "=", "attrdict", "(", ")", "for", "option", "in", "cfgparser", ".", "options", "(", "section", ")", ":", "# DEFAULT vars ALSO behave as though they were defined in every", "# section, but we purposely skip them here so that each section", "# reflects only the options explicitly defined in that section", "if", "not", "config", "[", "option", "]", ":", "config", "[", "section", "]", "[", "option", "]", "=", "cfgparser", ".", "get", "(", "section", ",", "option", ")", "config", ".", "verbosity", "=", "int", "(", "config", ".", "verbosity", ")", "if", "not", "config", ".", "root_url", ".", "endswith", "(", "'/'", ")", ":", "config", ".", "root_url", "+=", "'/'", "if", "os", ".", "path", ".", "isfile", "(", "config", ".", "credentials", ")", ":", "os", ".", "environ", "[", "environment_vars", ".", "CREDENTIALS", "]", "=", "config", ".", "credentials", "# if local_config override options with passed options", "if", "local_config", "is", "not", "None", ":", "for", "key", ",", "value", "in", "local_config", ".", "items", "(", ")", ":", "config", "[", "key", "]", "=", "value", "# if any explict config options are passed override.", "for", "key", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "config", "[", "key", "]", "=", "value", "return", "config" ]
Read initial configuration state, from named config files; store this state within a config dictionary (which may be nested) whose keys may also be referenced as attributes (safely, defaulting to None if unset). A config object may be passed in, as a way of accumulating or overwriting configuration state; if one is NOT passed, the default config obj is used
[ "Read", "initial", "configuration", "state", "from", "named", "config", "files", ";", "store", "this", "state", "within", "a", "config", "dictionary", "(", "which", "may", "be", "nested", ")", "whose", "keys", "may", "also", "be", "referenced", "as", "attributes", "(", "safely", "defaulting", "to", "None", "if", "unset", ")", ".", "A", "config", "object", "may", "be", "passed", "in", "as", "a", "way", "of", "accumulating", "or", "overwriting", "configuration", "state", ";", "if", "one", "is", "NOT", "passed", "the", "default", "config", "obj", "is", "used" ]
python
train
39.064516
cloudant/python-cloudant
src/cloudant/database.py
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/database.py#L242-L260
def get_design_document(self, ddoc_id): """ Retrieves a design document. If a design document exists remotely then that content is wrapped in a DesignDocument object and returned to the caller. Otherwise a "shell" DesignDocument object is returned. :param str ddoc_id: Design document id :returns: A DesignDocument instance, if exists remotely then it will be populated accordingly """ ddoc = DesignDocument(self, ddoc_id) try: ddoc.fetch() except HTTPError as error: if error.response.status_code != 404: raise return ddoc
[ "def", "get_design_document", "(", "self", ",", "ddoc_id", ")", ":", "ddoc", "=", "DesignDocument", "(", "self", ",", "ddoc_id", ")", "try", ":", "ddoc", ".", "fetch", "(", ")", "except", "HTTPError", "as", "error", ":", "if", "error", ".", "response", ".", "status_code", "!=", "404", ":", "raise", "return", "ddoc" ]
Retrieves a design document. If a design document exists remotely then that content is wrapped in a DesignDocument object and returned to the caller. Otherwise a "shell" DesignDocument object is returned. :param str ddoc_id: Design document id :returns: A DesignDocument instance, if exists remotely then it will be populated accordingly
[ "Retrieves", "a", "design", "document", ".", "If", "a", "design", "document", "exists", "remotely", "then", "that", "content", "is", "wrapped", "in", "a", "DesignDocument", "object", "and", "returned", "to", "the", "caller", ".", "Otherwise", "a", "shell", "DesignDocument", "object", "is", "returned", "." ]
python
train
34.210526
what-studio/profiling
profiling/remote/__init__.py
https://github.com/what-studio/profiling/blob/49666ba3ea295eb73782ae6c18a4ec7929d7d8b7/profiling/remote/__init__.py#L163-L197
def profiling(self): """A generator which profiles then broadcasts the result. Implement sleeping loop using this:: def profile_periodically(self): for __ in self.profiling(): time.sleep(self.interval) """ self._log_profiler_started() while self.clients: try: self.profiler.start() except RuntimeError: pass # should sleep. yield self.profiler.stop() result = self.profiler.result() data = pack_msg(RESULT, result, pickle_protocol=self.pickle_protocol) self._latest_result_data = data # broadcast. closed_clients = [] for client in self.clients: try: self._send(client, data) except socket.error as exc: if exc.errno == EPIPE: closed_clients.append(client) del data # handle disconnections. for client in closed_clients: self.disconnected(client) self._log_profiler_stopped()
[ "def", "profiling", "(", "self", ")", ":", "self", ".", "_log_profiler_started", "(", ")", "while", "self", ".", "clients", ":", "try", ":", "self", ".", "profiler", ".", "start", "(", ")", "except", "RuntimeError", ":", "pass", "# should sleep.", "yield", "self", ".", "profiler", ".", "stop", "(", ")", "result", "=", "self", ".", "profiler", ".", "result", "(", ")", "data", "=", "pack_msg", "(", "RESULT", ",", "result", ",", "pickle_protocol", "=", "self", ".", "pickle_protocol", ")", "self", ".", "_latest_result_data", "=", "data", "# broadcast.", "closed_clients", "=", "[", "]", "for", "client", "in", "self", ".", "clients", ":", "try", ":", "self", ".", "_send", "(", "client", ",", "data", ")", "except", "socket", ".", "error", "as", "exc", ":", "if", "exc", ".", "errno", "==", "EPIPE", ":", "closed_clients", ".", "append", "(", "client", ")", "del", "data", "# handle disconnections.", "for", "client", "in", "closed_clients", ":", "self", ".", "disconnected", "(", "client", ")", "self", ".", "_log_profiler_stopped", "(", ")" ]
A generator which profiles then broadcasts the result. Implement sleeping loop using this:: def profile_periodically(self): for __ in self.profiling(): time.sleep(self.interval)
[ "A", "generator", "which", "profiles", "then", "broadcasts", "the", "result", ".", "Implement", "sleeping", "loop", "using", "this", "::" ]
python
train
33.628571
Peter-Slump/django-dynamic-fixtures
src/dynamic_fixtures/fixtures/runner.py
https://github.com/Peter-Slump/django-dynamic-fixtures/blob/da99b4b12b11be28ea4b36b6cf2896ca449c73c1/src/dynamic_fixtures/fixtures/runner.py#L115-L128
def get_plan(self, nodes=None): """ Retrieve a plan, e.g. a list of fixtures to be loaded sorted on dependency. :param list nodes: list of nodes to be loaded. :return: """ if nodes: plan = self.graph.resolve_nodes(nodes) else: plan = self.graph.resolve_node() return plan
[ "def", "get_plan", "(", "self", ",", "nodes", "=", "None", ")", ":", "if", "nodes", ":", "plan", "=", "self", ".", "graph", ".", "resolve_nodes", "(", "nodes", ")", "else", ":", "plan", "=", "self", ".", "graph", ".", "resolve_node", "(", ")", "return", "plan" ]
Retrieve a plan, e.g. a list of fixtures to be loaded sorted on dependency. :param list nodes: list of nodes to be loaded. :return:
[ "Retrieve", "a", "plan", "e", ".", "g", ".", "a", "list", "of", "fixtures", "to", "be", "loaded", "sorted", "on", "dependency", "." ]
python
train
25.428571
googlefonts/glyphsLib
Lib/glyphsLib/builder/paths.py
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/paths.py#L21-L49
def to_ufo_paths(self, ufo_glyph, layer): """Draw .glyphs paths onto a pen.""" pen = ufo_glyph.getPointPen() for path in layer.paths: # the list is changed below, otherwise you can't draw more than once # per session. nodes = list(path.nodes) for node in nodes: self.to_ufo_node_user_data(ufo_glyph, node) pen.beginPath() if not nodes: pen.endPath() continue if not path.closed: node = nodes.pop(0) assert node.type == "line", "Open path starts with off-curve points" pen.addPoint(tuple(node.position), segmentType="move") else: # In Glyphs.app, the starting node of a closed contour is always # stored at the end of the nodes list. nodes.insert(0, nodes.pop()) for node in nodes: node_type = _to_ufo_node_type(node.type) pen.addPoint( tuple(node.position), segmentType=node_type, smooth=node.smooth ) pen.endPath()
[ "def", "to_ufo_paths", "(", "self", ",", "ufo_glyph", ",", "layer", ")", ":", "pen", "=", "ufo_glyph", ".", "getPointPen", "(", ")", "for", "path", "in", "layer", ".", "paths", ":", "# the list is changed below, otherwise you can't draw more than once", "# per session.", "nodes", "=", "list", "(", "path", ".", "nodes", ")", "for", "node", "in", "nodes", ":", "self", ".", "to_ufo_node_user_data", "(", "ufo_glyph", ",", "node", ")", "pen", ".", "beginPath", "(", ")", "if", "not", "nodes", ":", "pen", ".", "endPath", "(", ")", "continue", "if", "not", "path", ".", "closed", ":", "node", "=", "nodes", ".", "pop", "(", "0", ")", "assert", "node", ".", "type", "==", "\"line\"", ",", "\"Open path starts with off-curve points\"", "pen", ".", "addPoint", "(", "tuple", "(", "node", ".", "position", ")", ",", "segmentType", "=", "\"move\"", ")", "else", ":", "# In Glyphs.app, the starting node of a closed contour is always", "# stored at the end of the nodes list.", "nodes", ".", "insert", "(", "0", ",", "nodes", ".", "pop", "(", ")", ")", "for", "node", "in", "nodes", ":", "node_type", "=", "_to_ufo_node_type", "(", "node", ".", "type", ")", "pen", ".", "addPoint", "(", "tuple", "(", "node", ".", "position", ")", ",", "segmentType", "=", "node_type", ",", "smooth", "=", "node", ".", "smooth", ")", "pen", ".", "endPath", "(", ")" ]
Draw .glyphs paths onto a pen.
[ "Draw", ".", "glyphs", "paths", "onto", "a", "pen", "." ]
python
train
35.896552
Nydareld/ConfigEnv
ConfigEnv/Config.py
https://github.com/Nydareld/ConfigEnv/blob/38c13e5dd9d6c5f3dcd4c1194507a43384c31e29/ConfigEnv/Config.py#L43-L58
def get(self,path): """ permet de récupérer une config Args: path (String): Nom d'une config Returns: type: String la valeur de la config ou None """ path = path.upper() if path in self._configCache: return self._configCache[path] else : return self._findConfig(path)
[ "def", "get", "(", "self", ",", "path", ")", ":", "path", "=", "path", ".", "upper", "(", ")", "if", "path", "in", "self", ".", "_configCache", ":", "return", "self", ".", "_configCache", "[", "path", "]", "else", ":", "return", "self", ".", "_findConfig", "(", "path", ")" ]
permet de récupérer une config Args: path (String): Nom d'une config Returns: type: String la valeur de la config ou None
[ "permet", "de", "récupérer", "une", "config" ]
python
train
24.375
dourvaris/nano-python
src/nano/rpc.py
https://github.com/dourvaris/nano-python/blob/f26b8bc895b997067780f925049a70e82c0c2479/src/nano/rpc.py#L2901-L2924
def work_cancel(self, hash): """ Stop generating **work** for block .. enable_control required :param hash: Hash to stop generating work for :type hash: str :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.work_cancel( ... hash="718CC2121C3E641059BC1C2CFC45666C99E8AE922F7A807B7D07B62C995D79E2" ... ) True """ hash = self._process_value(hash, 'block') payload = {"hash": hash} resp = self.call('work_cancel', payload) return resp == {}
[ "def", "work_cancel", "(", "self", ",", "hash", ")", ":", "hash", "=", "self", ".", "_process_value", "(", "hash", ",", "'block'", ")", "payload", "=", "{", "\"hash\"", ":", "hash", "}", "resp", "=", "self", ".", "call", "(", "'work_cancel'", ",", "payload", ")", "return", "resp", "==", "{", "}" ]
Stop generating **work** for block .. enable_control required :param hash: Hash to stop generating work for :type hash: str :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.work_cancel( ... hash="718CC2121C3E641059BC1C2CFC45666C99E8AE922F7A807B7D07B62C995D79E2" ... ) True
[ "Stop", "generating", "**", "work", "**", "for", "block" ]
python
train
22.666667
LonamiWebs/Telethon
telethon/tl/custom/adminlogevent.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/tl/custom/adminlogevent.py#L95-L125
def new(self): """ The new value present in the event. """ ori = self.original.action if isinstance(ori, ( types.ChannelAdminLogEventActionChangeAbout, types.ChannelAdminLogEventActionChangeTitle, types.ChannelAdminLogEventActionChangeUsername, types.ChannelAdminLogEventActionToggleInvites, types.ChannelAdminLogEventActionTogglePreHistoryHidden, types.ChannelAdminLogEventActionToggleSignatures )): return ori.new_value elif isinstance(ori, types.ChannelAdminLogEventActionChangePhoto): return ori.new_photo elif isinstance(ori, types.ChannelAdminLogEventActionChangeStickerSet): return ori.new_stickerset elif isinstance(ori, types.ChannelAdminLogEventActionEditMessage): return ori.new_message elif isinstance(ori, ( types.ChannelAdminLogEventActionParticipantToggleAdmin, types.ChannelAdminLogEventActionParticipantToggleBan )): return ori.new_participant elif isinstance(ori, types.ChannelAdminLogEventActionParticipantInvite): return ori.participant elif isinstance(ori, types.ChannelAdminLogEventActionDefaultBannedRights): return ori.new_banned_rights elif isinstance(ori, types.ChannelAdminLogEventActionStopPoll): return ori.message
[ "def", "new", "(", "self", ")", ":", "ori", "=", "self", ".", "original", ".", "action", "if", "isinstance", "(", "ori", ",", "(", "types", ".", "ChannelAdminLogEventActionChangeAbout", ",", "types", ".", "ChannelAdminLogEventActionChangeTitle", ",", "types", ".", "ChannelAdminLogEventActionChangeUsername", ",", "types", ".", "ChannelAdminLogEventActionToggleInvites", ",", "types", ".", "ChannelAdminLogEventActionTogglePreHistoryHidden", ",", "types", ".", "ChannelAdminLogEventActionToggleSignatures", ")", ")", ":", "return", "ori", ".", "new_value", "elif", "isinstance", "(", "ori", ",", "types", ".", "ChannelAdminLogEventActionChangePhoto", ")", ":", "return", "ori", ".", "new_photo", "elif", "isinstance", "(", "ori", ",", "types", ".", "ChannelAdminLogEventActionChangeStickerSet", ")", ":", "return", "ori", ".", "new_stickerset", "elif", "isinstance", "(", "ori", ",", "types", ".", "ChannelAdminLogEventActionEditMessage", ")", ":", "return", "ori", ".", "new_message", "elif", "isinstance", "(", "ori", ",", "(", "types", ".", "ChannelAdminLogEventActionParticipantToggleAdmin", ",", "types", ".", "ChannelAdminLogEventActionParticipantToggleBan", ")", ")", ":", "return", "ori", ".", "new_participant", "elif", "isinstance", "(", "ori", ",", "types", ".", "ChannelAdminLogEventActionParticipantInvite", ")", ":", "return", "ori", ".", "participant", "elif", "isinstance", "(", "ori", ",", "types", ".", "ChannelAdminLogEventActionDefaultBannedRights", ")", ":", "return", "ori", ".", "new_banned_rights", "elif", "isinstance", "(", "ori", ",", "types", ".", "ChannelAdminLogEventActionStopPoll", ")", ":", "return", "ori", ".", "message" ]
The new value present in the event.
[ "The", "new", "value", "present", "in", "the", "event", "." ]
python
train
46.741935
CybOXProject/mixbox
mixbox/namespaces.py
https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/namespaces.py#L344-L405
def add_namespace_uri(self, ns_uri, prefix=None, schema_location=None): """Adds a new namespace to this set, optionally with a prefix and schema location URI. If the namespace already exists, the given prefix and schema location are merged with the existing entry: * If non-None, ``prefix`` is added to the set. The preferred prefix is not modified. * If a schema location is not already associated with the namespace, it is set to ``schema_location`` (if given). If the namespace doesn't already exist in this set (so a new one is being created) and a prefix is given, that prefix becomes preferred. If not given, a preference as a default namespace is used. Args: ns_uri (str): The URI of the new namespace prefix (str): The desired prefix for the new namespace (optional) schema_location (str): The desired schema location for the new namespace (optional). Raises: DuplicatePrefixError: If a prefix is given which already maps to a different namespace ConflictingSchemaLocationError: If a schema location is given and the namespace already exists in this set with a different schema location. """ assert ns_uri if ns_uri in self.__ns_uri_map: # We have a _NamespaceInfo object for this URI already. So this # is a merge operation. # # We modify a copy of the real _NamespaceInfo so that we are # exception-safe: if something goes wrong, we don't end up with a # half-changed NamespaceSet. ni = self.__lookup_uri(ns_uri) new_ni = copy.deepcopy(ni) # Reconcile prefixes if prefix: self.__check_prefix_conflict(ni, prefix) new_ni.prefixes.add(prefix) self.__merge_schema_locations(new_ni, schema_location) # At this point, we have a legit new_ni object. Now we update # the set, ensuring our invariants. This should replace # all instances of the old ni in this set. for p in new_ni.prefixes: self.__prefix_map[p] = new_ni self.__ns_uri_map[new_ni.uri] = new_ni else: # A brand new namespace. The incoming prefix should not exist at # all in the prefix map. if prefix: self.__check_prefix_conflict(ns_uri, prefix) ni = _NamespaceInfo(ns_uri, prefix, schema_location) self.__add_namespaceinfo(ni)
[ "def", "add_namespace_uri", "(", "self", ",", "ns_uri", ",", "prefix", "=", "None", ",", "schema_location", "=", "None", ")", ":", "assert", "ns_uri", "if", "ns_uri", "in", "self", ".", "__ns_uri_map", ":", "# We have a _NamespaceInfo object for this URI already. So this", "# is a merge operation.", "#", "# We modify a copy of the real _NamespaceInfo so that we are", "# exception-safe: if something goes wrong, we don't end up with a", "# half-changed NamespaceSet.", "ni", "=", "self", ".", "__lookup_uri", "(", "ns_uri", ")", "new_ni", "=", "copy", ".", "deepcopy", "(", "ni", ")", "# Reconcile prefixes", "if", "prefix", ":", "self", ".", "__check_prefix_conflict", "(", "ni", ",", "prefix", ")", "new_ni", ".", "prefixes", ".", "add", "(", "prefix", ")", "self", ".", "__merge_schema_locations", "(", "new_ni", ",", "schema_location", ")", "# At this point, we have a legit new_ni object. Now we update", "# the set, ensuring our invariants. This should replace", "# all instances of the old ni in this set.", "for", "p", "in", "new_ni", ".", "prefixes", ":", "self", ".", "__prefix_map", "[", "p", "]", "=", "new_ni", "self", ".", "__ns_uri_map", "[", "new_ni", ".", "uri", "]", "=", "new_ni", "else", ":", "# A brand new namespace. The incoming prefix should not exist at", "# all in the prefix map.", "if", "prefix", ":", "self", ".", "__check_prefix_conflict", "(", "ns_uri", ",", "prefix", ")", "ni", "=", "_NamespaceInfo", "(", "ns_uri", ",", "prefix", ",", "schema_location", ")", "self", ".", "__add_namespaceinfo", "(", "ni", ")" ]
Adds a new namespace to this set, optionally with a prefix and schema location URI. If the namespace already exists, the given prefix and schema location are merged with the existing entry: * If non-None, ``prefix`` is added to the set. The preferred prefix is not modified. * If a schema location is not already associated with the namespace, it is set to ``schema_location`` (if given). If the namespace doesn't already exist in this set (so a new one is being created) and a prefix is given, that prefix becomes preferred. If not given, a preference as a default namespace is used. Args: ns_uri (str): The URI of the new namespace prefix (str): The desired prefix for the new namespace (optional) schema_location (str): The desired schema location for the new namespace (optional). Raises: DuplicatePrefixError: If a prefix is given which already maps to a different namespace ConflictingSchemaLocationError: If a schema location is given and the namespace already exists in this set with a different schema location.
[ "Adds", "a", "new", "namespace", "to", "this", "set", "optionally", "with", "a", "prefix", "and", "schema", "location", "URI", "." ]
python
train
42.725806