nodes
stringlengths 501
22.4k
| edges
stringlengths 138
5.07k
| code
stringlengths 108
19.3k
|
---|---|---|
0, module; 1, function_definition; 2, function_name:get_dummy_dynamic_run; 3, parameters; 4, block; 5, identifier:nsamples; 6, dictionary_splat_pattern; 7, expression_statement; 8, expression_statement; 9, expression_statement; 10, expression_statement; 11, expression_statement; 12, expression_statement; 13, if_statement; 14, expression_statement; 15, expression_statement; 16, expression_statement; 17, comment:# Seed must be False here so it is not set again for each thread; 18, expression_statement; 19, comment:# make sure the threads have unique labels and combine them; 20, for_statement; 21, expression_statement; 22, comment:# To make sure the thread labelling is same way it would when; 23, comment:# processing a dead points file, tranform into dead points; 24, expression_statement; 25, return_statement; 26, identifier:kwargs; 27, comment:"""Generate dummy data for a dynamic nested sampling run.
Loglikelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nsamples: int
Number of samples in thread.
nthread_init: int
Number of threads in the inital run (starting at logl=-np.inf).
nthread_dyn: int
Number of threads in the inital run (starting at randomly chosen points
in the initial run).
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values.
"""; 28, assignment; 29, assignment; 30, assignment; 31, assignment; 32, assignment; 33, identifier:kwargs; 34, block; 35, assignment; 36, assignment; 37, assignment; 38, augmented_assignment; 39, pattern_list; 40, call; 41, block; 42, assignment; 43, assignment; 44, call; 45, identifier:seed; 46, call; 47, identifier:ndim; 48, call; 49, identifier:nthread_init; 50, call; 51, identifier:nthread_dyn; 52, call; 53, identifier:logl_range; 54, call; 55, raise_statement; 56, identifier:init; 57, call; 58, identifier:dyn_starts; 59, call; 60, identifier:threads; 61, call; 62, identifier:threads; 63, list_comprehension; 64, identifier:i; 65, identifier:_; 66, identifier:enumerate; 67, argument_list; 68, expression_statement; 69, identifier:run; 70, call; 71, identifier:samples; 72, call; 73, attribute; 74, argument_list; 75, attribute; 76, argument_list; 77, attribute; 78, argument_list; 79, attribute; 80, argument_list; 81, attribute; 82, argument_list; 83, attribute; 84, argument_list; 85, call; 86, identifier:get_dummy_run; 87, argument_list; 88, identifier:list; 89, argument_list; 90, attribute; 91, argument_list; 92, call; 93, for_in_clause; 94, identifier:threads; 95, assignment; 96, attribute; 97, argument_list; 98, attribute; 99, argument_list; 100, attribute; 101, identifier:process_samples_array; 102, identifier:samples; 103, identifier:kwargs; 104, identifier:pop; 105, string; 106, False; 107, identifier:kwargs; 108, identifier:pop; 109, string; 110, integer:2; 111, identifier:kwargs; 112, identifier:pop; 113, string; 114, integer:2; 115, identifier:kwargs; 116, identifier:pop; 117, string; 118, integer:3; 119, identifier:kwargs; 120, identifier:pop; 121, string; 122, integer:1; 123, identifier:TypeError; 124, argument_list; 125, identifier:nthread_init; 126, identifier:nsamples; 127, keyword_argument; 128, keyword_argument; 129, keyword_argument; 130, keyword_argument; 131, call; 132, attribute; 133, identifier:get_run_threads; 134, identifier:init; 135, identifier:get_dummy_thread; 136, argument_list; 137, identifier:start; 138, identifier:dyn_starts; 139, subscript; 140, call; 141, attribute; 142, identifier:combine_threads; 143, identifier:threads; 144, attribute; 145, identifier:run_dead_birth_array; 146, identifier:run; 147, identifier:nestcheck; 148, identifier:data_processing; 149, string_content:seed; 150, string_content:ndim; 151, string_content:nthread_init; 152, string_content:nthread_dyn; 153, string_content:logl_range; 154, call; 155, identifier:ndim; 156, identifier:ndim; 157, identifier:seed; 158, identifier:seed; 159, identifier:logl_start; 160, unary_operator; 161, identifier:logl_range; 162, identifier:logl_range; 163, attribute; 164, argument_list; 165, identifier:nestcheck; 166, identifier:ns_run_utils; 167, identifier:nsamples; 168, keyword_argument; 169, keyword_argument; 170, keyword_argument; 171, keyword_argument; 172, subscript; 173, string; 174, attribute; 175, argument_list; 176, identifier:nestcheck; 177, identifier:ns_run_utils; 178, identifier:nestcheck; 179, identifier:write_polychord_output; 180, attribute; 181, argument_list; 182, attribute; 183, attribute; 184, identifier:choice; 185, subscript; 186, identifier:nthread_dyn; 187, keyword_argument; 188, identifier:ndim; 189, identifier:ndim; 190, identifier:seed; 191, False; 192, identifier:logl_start; 193, identifier:start; 194, identifier:logl_range; 195, identifier:logl_range; 196, identifier:threads; 197, identifier:i; 198, string_content:thread_labels; 199, identifier:np; 200, identifier:full; 201, identifier:nsamples; 202, identifier:i; 203, string; 204, identifier:format; 205, identifier:kwargs; 206, identifier:np; 207, identifier:inf; 208, identifier:np; 209, identifier:random; 210, identifier:init; 211, string; 212, identifier:replace; 213, True; 214, string_content:Unexpected **kwargs: {0}; 215, string_content:logl | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 4, 22; 4, 23; 4, 24; 4, 25; 6, 26; 7, 27; 8, 28; 9, 29; 10, 30; 11, 31; 12, 32; 13, 33; 13, 34; 14, 35; 15, 36; 16, 37; 18, 38; 20, 39; 20, 40; 20, 41; 21, 42; 24, 43; 25, 44; 28, 45; 28, 46; 29, 47; 29, 48; 30, 49; 30, 50; 31, 51; 31, 52; 32, 53; 32, 54; 34, 55; 35, 56; 35, 57; 36, 58; 36, 59; 37, 60; 37, 61; 38, 62; 38, 63; 39, 64; 39, 65; 40, 66; 40, 67; 41, 68; 42, 69; 42, 70; 43, 71; 43, 72; 44, 73; 44, 74; 46, 75; 46, 76; 48, 77; 48, 78; 50, 79; 50, 80; 52, 81; 52, 82; 54, 83; 54, 84; 55, 85; 57, 86; 57, 87; 59, 88; 59, 89; 61, 90; 61, 91; 63, 92; 63, 93; 67, 94; 68, 95; 70, 96; 70, 97; 72, 98; 72, 99; 73, 100; 73, 101; 74, 102; 75, 103; 75, 104; 76, 105; 76, 106; 77, 107; 77, 108; 78, 109; 78, 110; 79, 111; 79, 112; 80, 113; 80, 114; 81, 115; 81, 116; 82, 117; 82, 118; 83, 119; 83, 120; 84, 121; 84, 122; 85, 123; 85, 124; 87, 125; 87, 126; 87, 127; 87, 128; 87, 129; 87, 130; 89, 131; 90, 132; 90, 133; 91, 134; 92, 135; 92, 136; 93, 137; 93, 138; 95, 139; 95, 140; 96, 141; 96, 142; 97, 143; 98, 144; 98, 145; 99, 146; 100, 147; 100, 148; 105, 149; 109, 150; 113, 151; 117, 152; 121, 153; 124, 154; 127, 155; 127, 156; 128, 157; 128, 158; 129, 159; 129, 160; 130, 161; 130, 162; 131, 163; 131, 164; 132, 165; 132, 166; 136, 167; 136, 168; 136, 169; 136, 170; 136, 171; 139, 172; 139, 173; 140, 174; 140, 175; 141, 176; 141, 177; 144, 178; 144, 179; 154, 180; 154, 181; 160, 182; 163, 183; 163, 184; 164, 185; 164, 186; 164, 187; 168, 188; 168, 189; 169, 190; 169, 191; 170, 192; 170, 193; 171, 194; 171, 195; 172, 196; 172, 197; 173, 198; 174, 199; 174, 200; 175, 201; 175, 202; 180, 203; 180, 204; 181, 205; 182, 206; 182, 207; 183, 208; 183, 209; 185, 210; 185, 211; 187, 212; 187, 213; 203, 214; 211, 215 | def get_dummy_dynamic_run(nsamples, **kwargs):
"""Generate dummy data for a dynamic nested sampling run.
Loglikelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nsamples: int
Number of samples in thread.
nthread_init: int
Number of threads in the inital run (starting at logl=-np.inf).
nthread_dyn: int
Number of threads in the inital run (starting at randomly chosen points
in the initial run).
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values.
"""
seed = kwargs.pop('seed', False)
ndim = kwargs.pop('ndim', 2)
nthread_init = kwargs.pop('nthread_init', 2)
nthread_dyn = kwargs.pop('nthread_dyn', 3)
logl_range = kwargs.pop('logl_range', 1)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
init = get_dummy_run(nthread_init, nsamples, ndim=ndim, seed=seed,
logl_start=-np.inf, logl_range=logl_range)
dyn_starts = list(np.random.choice(
init['logl'], nthread_dyn, replace=True))
threads = nestcheck.ns_run_utils.get_run_threads(init)
# Seed must be False here so it is not set again for each thread
threads += [get_dummy_thread(
nsamples, ndim=ndim, seed=False, logl_start=start,
logl_range=logl_range) for start in dyn_starts]
# make sure the threads have unique labels and combine them
for i, _ in enumerate(threads):
threads[i]['thread_labels'] = np.full(nsamples, i)
run = nestcheck.ns_run_utils.combine_threads(threads)
# To make sure the thread labelling is same way it would when
# processing a dead points file, tranform into dead points
samples = nestcheck.write_polychord_output.run_dead_birth_array(run)
return nestcheck.data_processing.process_samples_array(samples) |
0, module; 1, function_definition; 2, function_name:get_dates_for_project; 3, parameters; 4, block; 5, identifier:self; 6, identifier:project; 7, expression_statement; 8, expression_statement; 9, expression_statement; 10, for_statement; 11, return_statement; 12, comment:"""
Return a list of the dates we have in cache for the specified project,
sorted in ascending date order.
:param project: project name
:type project: str
:return: list of datetime.datetime objects
:rtype: datetime.datetime
"""; 13, assignment; 14, assignment; 15, identifier:f; 16, call; 17, block; 18, call; 19, identifier:file_re; 20, call; 21, identifier:all_dates; 22, list; 23, attribute; 24, argument_list; 25, if_statement; 26, expression_statement; 27, if_statement; 28, expression_statement; 29, identifier:sorted; 30, argument_list; 31, attribute; 32, argument_list; 33, identifier:os; 34, identifier:listdir; 35, attribute; 36, not_operator; 37, block; 38, assignment; 39, comparison_operator:m is None; 40, block; 41, call; 42, identifier:all_dates; 43, identifier:re; 44, identifier:compile; 45, binary_operator:r'^%s_([0-9]{8})\.json$' % project; 46, identifier:self; 47, identifier:cache_path; 48, call; 49, continue_statement; 50, identifier:m; 51, call; 52, identifier:m; 53, None; 54, continue_statement; 55, attribute; 56, argument_list; 57, string; 58, identifier:project; 59, attribute; 60, argument_list; 61, attribute; 62, argument_list; 63, identifier:all_dates; 64, identifier:append; 65, call; 66, string_content:^%s_([0-9]{8})\.json$; 67, attribute; 68, identifier:isfile; 69, call; 70, identifier:file_re; 71, identifier:match; 72, identifier:f; 73, attribute; 74, argument_list; 75, identifier:os; 76, identifier:path; 77, attribute; 78, argument_list; 79, identifier:datetime; 80, identifier:strptime; 81, call; 82, string; 83, attribute; 84, identifier:join; 85, attribute; 86, identifier:f; 87, attribute; 88, argument_list; 89, string_content:%Y%m%d; 90, identifier:os; 91, identifier:path; 92, identifier:self; 93, identifier:cache_path; 94, identifier:m; 95, identifier:group; 96, integer:1 | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 7, 12; 8, 13; 9, 14; 10, 15; 10, 16; 10, 17; 11, 18; 13, 19; 13, 20; 14, 21; 14, 22; 16, 23; 16, 24; 17, 25; 17, 26; 17, 27; 17, 28; 18, 29; 18, 30; 20, 31; 20, 32; 23, 33; 23, 34; 24, 35; 25, 36; 25, 37; 26, 38; 27, 39; 27, 40; 28, 41; 30, 42; 31, 43; 31, 44; 32, 45; 35, 46; 35, 47; 36, 48; 37, 49; 38, 50; 38, 51; 39, 52; 39, 53; 40, 54; 41, 55; 41, 56; 45, 57; 45, 58; 48, 59; 48, 60; 51, 61; 51, 62; 55, 63; 55, 64; 56, 65; 57, 66; 59, 67; 59, 68; 60, 69; 61, 70; 61, 71; 62, 72; 65, 73; 65, 74; 67, 75; 67, 76; 69, 77; 69, 78; 73, 79; 73, 80; 74, 81; 74, 82; 77, 83; 77, 84; 78, 85; 78, 86; 81, 87; 81, 88; 82, 89; 83, 90; 83, 91; 85, 92; 85, 93; 87, 94; 87, 95; 88, 96 | def get_dates_for_project(self, project):
"""
Return a list of the dates we have in cache for the specified project,
sorted in ascending date order.
:param project: project name
:type project: str
:return: list of datetime.datetime objects
:rtype: datetime.datetime
"""
file_re = re.compile(r'^%s_([0-9]{8})\.json$' % project)
all_dates = []
for f in os.listdir(self.cache_path):
if not os.path.isfile(os.path.join(self.cache_path, f)):
continue
m = file_re.match(f)
if m is None:
continue
all_dates.append(datetime.strptime(m.group(1), '%Y%m%d'))
return sorted(all_dates) |
0, module; 1, function_definition; 2, function_name:insort_event_right; 3, parameters; 4, block; 5, identifier:self; 6, identifier:event; 7, default_parameter; 8, default_parameter; 9, expression_statement; 10, if_statement; 11, if_statement; 12, while_statement; 13, expression_statement; 14, identifier:lo; 15, integer:0; 16, identifier:hi; 17, None; 18, comment:"""Insert event in queue, and keep it sorted assuming queue is sorted.
If event is already in queue, insert it to the right of the rightmost
event (to keep FIFO order).
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
Args:
event: a (time in sec since unix epoch, callback, args, kwds) tuple.
"""; 19, comparison_operator:lo < 0; 20, block; 21, comparison_operator:hi is None; 22, block; 23, comparison_operator:lo < hi; 24, block; 25, call; 26, identifier:lo; 27, integer:0; 28, raise_statement; 29, identifier:hi; 30, None; 31, expression_statement; 32, identifier:lo; 33, identifier:hi; 34, expression_statement; 35, if_statement; 36, attribute; 37, argument_list; 38, call; 39, assignment; 40, assignment; 41, comparison_operator:event[0] < self.queue[mid][0]; 42, block; 43, else_clause; 44, attribute; 45, identifier:insert; 46, identifier:lo; 47, identifier:event; 48, identifier:ValueError; 49, argument_list; 50, identifier:hi; 51, call; 52, identifier:mid; 53, binary_operator:(lo + hi) // 2; 54, subscript; 55, subscript; 56, expression_statement; 57, block; 58, identifier:self; 59, identifier:queue; 60, string; 61, identifier:len; 62, argument_list; 63, parenthesized_expression; 64, integer:2; 65, identifier:event; 66, integer:0; 67, subscript; 68, integer:0; 69, assignment; 70, expression_statement; 71, string_content:lo must be non-negative; 72, attribute; 73, binary_operator:lo + hi; 74, attribute; 75, identifier:mid; 76, identifier:hi; 77, identifier:mid; 78, assignment; 79, identifier:self; 80, identifier:queue; 81, identifier:lo; 82, identifier:hi; 83, identifier:self; 84, identifier:queue; 85, identifier:lo; 86, binary_operator:mid + 1; 87, identifier:mid; 88, integer:1 | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 7, 14; 7, 15; 8, 16; 8, 17; 9, 18; 10, 19; 10, 20; 11, 21; 11, 22; 12, 23; 12, 24; 13, 25; 19, 26; 19, 27; 20, 28; 21, 29; 21, 30; 22, 31; 23, 32; 23, 33; 24, 34; 24, 35; 25, 36; 25, 37; 28, 38; 31, 39; 34, 40; 35, 41; 35, 42; 35, 43; 36, 44; 36, 45; 37, 46; 37, 47; 38, 48; 38, 49; 39, 50; 39, 51; 40, 52; 40, 53; 41, 54; 41, 55; 42, 56; 43, 57; 44, 58; 44, 59; 49, 60; 51, 61; 51, 62; 53, 63; 53, 64; 54, 65; 54, 66; 55, 67; 55, 68; 56, 69; 57, 70; 60, 71; 62, 72; 63, 73; 67, 74; 67, 75; 69, 76; 69, 77; 70, 78; 72, 79; 72, 80; 73, 81; 73, 82; 74, 83; 74, 84; 78, 85; 78, 86; 86, 87; 86, 88 | def insort_event_right(self, event, lo=0, hi=None):
"""Insert event in queue, and keep it sorted assuming queue is sorted.
If event is already in queue, insert it to the right of the rightmost
event (to keep FIFO order).
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
Args:
event: a (time in sec since unix epoch, callback, args, kwds) tuple.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(self.queue)
while lo < hi:
mid = (lo + hi) // 2
if event[0] < self.queue[mid][0]:
hi = mid
else:
lo = mid + 1
self.queue.insert(lo, event) |
0, module; 1, function_definition; 2, function_name:getMd5Checksum; 3, parameters; 4, block; 5, identifier:self; 6, expression_statement; 7, expression_statement; 8, expression_statement; 9, expression_statement; 10, return_statement; 11, comment:"""
Returns the MD5 checksum for this reference set. This checksum is
calculated by making a list of `Reference.md5checksum` for all
`Reference`s in this set. We then sort this list, and take the
MD5 hash of all the strings concatenated together.
"""; 12, assignment; 13, assignment; 14, assignment; 15, identifier:md5checksum; 16, identifier:references; 17, call; 18, identifier:checksums; 19, call; 20, identifier:md5checksum; 21, call; 22, identifier:sorted; 23, argument_list; 24, attribute; 25, argument_list; 26, attribute; 27, argument_list; 28, call; 29, keyword_argument; 30, string; 31, identifier:join; 32, list_comprehension; 33, call; 34, identifier:hexdigest; 35, attribute; 36, argument_list; 37, identifier:key; 38, lambda; 39, call; 40, for_in_clause; 41, attribute; 42, argument_list; 43, identifier:self; 44, identifier:getReferences; 45, lambda_parameters; 46, call; 47, attribute; 48, argument_list; 49, identifier:ref; 50, identifier:references; 51, identifier:hashlib; 52, identifier:md5; 53, identifier:checksums; 54, identifier:ref; 55, attribute; 56, argument_list; 57, identifier:ref; 58, identifier:getMd5Checksum; 59, identifier:ref; 60, identifier:getMd5Checksum | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 6, 11; 7, 12; 8, 13; 9, 14; 10, 15; 12, 16; 12, 17; 13, 18; 13, 19; 14, 20; 14, 21; 17, 22; 17, 23; 19, 24; 19, 25; 21, 26; 21, 27; 23, 28; 23, 29; 24, 30; 24, 31; 25, 32; 26, 33; 26, 34; 28, 35; 28, 36; 29, 37; 29, 38; 32, 39; 32, 40; 33, 41; 33, 42; 35, 43; 35, 44; 38, 45; 38, 46; 39, 47; 39, 48; 40, 49; 40, 50; 41, 51; 41, 52; 42, 53; 45, 54; 46, 55; 46, 56; 47, 57; 47, 58; 55, 59; 55, 60 | def getMd5Checksum(self):
"""
Returns the MD5 checksum for this reference set. This checksum is
calculated by making a list of `Reference.md5checksum` for all
`Reference`s in this set. We then sort this list, and take the
MD5 hash of all the strings concatenated together.
"""
references = sorted(
self.getReferences(),
key=lambda ref: ref.getMd5Checksum())
checksums = ''.join([ref.getMd5Checksum() for ref in references])
md5checksum = hashlib.md5(checksums).hexdigest()
return md5checksum |
0, module; 1, function_definition; 2, function_name:knn_impute_few_observed; 3, parameters; 4, block; 5, identifier:X; 6, identifier:missing_mask; 7, identifier:k; 8, default_parameter; 9, default_parameter; 10, expression_statement; 11, expression_statement; 12, expression_statement; 13, comment:# put the missing mask in column major order since it's accessed; 14, comment:# one column at a time; 15, expression_statement; 16, expression_statement; 17, expression_statement; 18, expression_statement; 19, comment:# get rid of infinities, replace them with a very large number; 20, expression_statement; 21, expression_statement; 22, expression_statement; 23, expression_statement; 24, comment:# trim the number of other rows we consider to exclude those; 25, comment:# with infinite distances; 26, expression_statement; 27, expression_statement; 28, for_statement; 29, return_statement; 30, identifier:verbose; 31, False; 32, identifier:print_interval; 33, integer:100; 34, comment:"""
Seems to be the fastest kNN implementation. Pre-sorts each rows neighbors
and then filters these sorted indices using each columns mask of
observed values.
Important detail: If k observed values are not available then uses fewer
than k neighboring rows.
Parameters
----------
X : np.ndarray
Matrix to fill of shape (n_samples, n_features)
missing_mask : np.ndarray
Boolean array of same shape as X
k : int
verbose : bool
"""; 35, assignment; 36, assignment; 37, assignment; 38, assignment; 39, assignment; 40, assignment; 41, assignment; 42, assignment; 43, assignment; 44, assignment; 45, assignment; 46, assignment; 47, identifier:i; 48, call; 49, block; 50, identifier:X_row_major; 51, identifier:start_t; 52, call; 53, pattern_list; 54, attribute; 55, identifier:missing_mask_column_major; 56, call; 57, identifier:observed_mask_column_major; 58, unary_operator; 59, identifier:X_column_major; 60, call; 61, pattern_list; 62, line_continuation:\; 63, call; 64, identifier:D_sorted; 65, call; 66, identifier:inv_D; 67, binary_operator:1.0 / D; 68, identifier:D_valid_mask; 69, comparison_operator:D < effective_infinity; 70, identifier:valid_distances_per_row; 71, call; 72, identifier:D_sorted; 73, list_comprehension; 74, identifier:dot; 75, attribute; 76, identifier:range; 77, argument_list; 78, expression_statement; 79, expression_statement; 80, expression_statement; 81, if_statement; 82, expression_statement; 83, for_statement; 84, attribute; 85, argument_list; 86, identifier:n_rows; 87, identifier:n_cols; 88, identifier:X; 89, identifier:shape; 90, attribute; 91, argument_list; 92, identifier:missing_mask_column_major; 93, attribute; 94, argument_list; 95, identifier:X_row_major; 96, identifier:D; 97, identifier:effective_infinity; 98, identifier:knn_initialize; 99, argument_list; 100, attribute; 101, argument_list; 102, float:1.0; 103, identifier:D; 104, identifier:D; 105, identifier:effective_infinity; 106, attribute; 107, argument_list; 108, subscript; 109, for_in_clause; 110, identifier:np; 111, identifier:dot; 112, identifier:n_rows; 113, assignment; 114, assignment; 115, assignment; 116, boolean_operator; 117, block; 118, assignment; 119, identifier:j; 120, identifier:missing_indices; 121, block; 122, identifier:time; 123, identifier:time; 124, identifier:np; 125, identifier:asarray; 126, identifier:missing_mask; 127, keyword_argument; 128, identifier:X; 129, identifier:copy; 130, keyword_argument; 131, identifier:X; 132, identifier:missing_mask; 133, keyword_argument; 134, identifier:np; 135, identifier:argsort; 136, identifier:D; 137, keyword_argument; 138, identifier:D_valid_mask; 139, identifier:sum; 140, keyword_argument; 141, identifier:D_sorted; 142, identifier:i; 143, slice; 144, pattern_list; 145, call; 146, identifier:missing_row; 147, subscript; 148, identifier:missing_indices; 149, subscript; 150, identifier:row_weights; 151, subscript; 152, identifier:verbose; 153, comparison_operator:i % print_interval == 0; 154, expression_statement; 155, identifier:candidate_neighbor_indices; 156, subscript; 157, expression_statement; 158, expression_statement; 159, expression_statement; 160, expression_statement; 161, expression_statement; 162, expression_statement; 163, if_statement; 164, identifier:order; 165, string:"F"; 166, identifier:order; 167, string:"F"; 168, identifier:verbose; 169, identifier:verbose; 170, identifier:axis; 171, integer:1; 172, identifier:axis; 173, integer:1; 174, identifier:count; 175, identifier:i; 176, identifier:count; 177, identifier:enumerate; 178, argument_list; 179, identifier:missing_mask; 180, identifier:i; 181, slice; 182, call; 183, integer:0; 184, identifier:inv_D; 185, identifier:i; 186, slice; 187, binary_operator:i % print_interval; 188, integer:0; 189, call; 190, identifier:D_sorted; 191, identifier:i; 192, assignment; 193, assignment; 194, assignment; 195, assignment; 196, assignment; 197, assignment; 198, comparison_operator:weight_sum > 0; 199, block; 200, identifier:valid_distances_per_row; 201, attribute; 202, argument_list; 203, identifier:i; 204, identifier:print_interval; 205, identifier:print; 206, argument_list; 207, identifier:observed; 208, subscript; 209, identifier:sorted_observed; 210, subscript; 211, identifier:observed_neighbor_indices; 212, subscript; 213, identifier:k_nearest_indices; 214, subscript; 215, identifier:weights; 216, subscript; 217, identifier:weight_sum; 218, call; 219, identifier:weight_sum; 220, integer:0; 221, expression_statement; 222, expression_statement; 223, expression_statement; 224, identifier:np; 225, identifier:where; 226, identifier:missing_row; 227, binary_operator:"Imputing row %d/%d with %d missing, elapsed time: %0.3f" % (
i + 1,
n_rows,
len(missing_indices),
time.time() - start_t); 228, identifier:observed_mask_column_major; 229, slice; 230, identifier:j; 231, identifier:observed; 232, identifier:candidate_neighbor_indices; 233, identifier:candidate_neighbor_indices; 234, identifier:sorted_observed; 235, identifier:observed_neighbor_indices; 236, slice; 237, identifier:row_weights; 238, identifier:k_nearest_indices; 239, attribute; 240, argument_list; 241, assignment; 242, assignment; 243, assignment; 244, string:"Imputing row %d/%d with %d missing, elapsed time: %0.3f"; 245, tuple; 246, identifier:k; 247, identifier:weights; 248, identifier:sum; 249, identifier:column; 250, subscript; 251, identifier:values; 252, subscript; 253, subscript; 254, binary_operator:dot(values, weights) / weight_sum; 255, binary_operator:i + 1; 256, identifier:n_rows; 257, call; 258, binary_operator:time.time() - start_t; 259, identifier:X_column_major; 260, slice; 261, identifier:j; 262, identifier:column; 263, identifier:k_nearest_indices; 264, identifier:X_row_major; 265, identifier:i; 266, identifier:j; 267, call; 268, identifier:weight_sum; 269, identifier:i; 270, integer:1; 271, identifier:len; 272, argument_list; 273, call; 274, identifier:start_t; 275, identifier:dot; 276, argument_list; 277, identifier:missing_indices; 278, attribute; 279, argument_list; 280, identifier:values; 281, identifier:weights; 282, identifier:time; 283, identifier:time | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 4, 22; 4, 23; 4, 24; 4, 25; 4, 26; 4, 27; 4, 28; 4, 29; 8, 30; 8, 31; 9, 32; 9, 33; 10, 34; 11, 35; 12, 36; 15, 37; 16, 38; 17, 39; 18, 40; 20, 41; 21, 42; 22, 43; 23, 44; 26, 45; 27, 46; 28, 47; 28, 48; 28, 49; 29, 50; 35, 51; 35, 52; 36, 53; 36, 54; 37, 55; 37, 56; 38, 57; 38, 58; 39, 59; 39, 60; 40, 61; 40, 62; 40, 63; 41, 64; 41, 65; 42, 66; 42, 67; 43, 68; 43, 69; 44, 70; 44, 71; 45, 72; 45, 73; 46, 74; 46, 75; 48, 76; 48, 77; 49, 78; 49, 79; 49, 80; 49, 81; 49, 82; 49, 83; 52, 84; 52, 85; 53, 86; 53, 87; 54, 88; 54, 89; 56, 90; 56, 91; 58, 92; 60, 93; 60, 94; 61, 95; 61, 96; 61, 97; 63, 98; 63, 99; 65, 100; 65, 101; 67, 102; 67, 103; 69, 104; 69, 105; 71, 106; 71, 107; 73, 108; 73, 109; 75, 110; 75, 111; 77, 112; 78, 113; 79, 114; 80, 115; 81, 116; 81, 117; 82, 118; 83, 119; 83, 120; 83, 121; 84, 122; 84, 123; 90, 124; 90, 125; 91, 126; 91, 127; 93, 128; 93, 129; 94, 130; 99, 131; 99, 132; 99, 133; 100, 134; 100, 135; 101, 136; 101, 137; 106, 138; 106, 139; 107, 140; 108, 141; 108, 142; 108, 143; 109, 144; 109, 145; 113, 146; 113, 147; 114, 148; 114, 149; 115, 150; 115, 151; 116, 152; 116, 153; 117, 154; 118, 155; 118, 156; 121, 157; 121, 158; 121, 159; 121, 160; 121, 161; 121, 162; 121, 163; 127, 164; 127, 165; 130, 166; 130, 167; 133, 168; 133, 169; 137, 170; 137, 171; 140, 172; 140, 173; 143, 174; 144, 175; 144, 176; 145, 177; 145, 178; 147, 179; 147, 180; 147, 181; 149, 182; 149, 183; 151, 184; 151, 185; 151, 186; 153, 187; 153, 188; 154, 189; 156, 190; 156, 191; 157, 192; 158, 193; 159, 194; 160, 195; 161, 196; 162, 197; 163, 198; 163, 199; 178, 200; 182, 201; 182, 202; 187, 203; 187, 204; 189, 205; 189, 206; 192, 207; 192, 208; 193, 209; 193, 210; 194, 211; 194, 212; 195, 213; 195, 214; 196, 215; 196, 216; 197, 217; 197, 218; 198, 219; 198, 220; 199, 221; 199, 222; 199, 223; 201, 224; 201, 225; 202, 226; 206, 227; 208, 228; 208, 229; 208, 230; 210, 231; 210, 232; 212, 233; 212, 234; 214, 235; 214, 236; 216, 237; 216, 238; 218, 239; 218, 240; 221, 241; 222, 242; 223, 243; 227, 244; 227, 245; 236, 246; 239, 247; 239, 248; 241, 249; 241, 250; 242, 251; 242, 252; 243, 253; 243, 254; 245, 255; 245, 256; 245, 257; 245, 258; 250, 259; 250, 260; 250, 261; 252, 262; 252, 263; 253, 264; 253, 265; 253, 266; 254, 267; 254, 268; 255, 269; 255, 270; 257, 271; 257, 272; 258, 273; 258, 274; 267, 275; 267, 276; 272, 277; 273, 278; 273, 279; 276, 280; 276, 281; 278, 282; 278, 283 | def knn_impute_few_observed(
X, missing_mask, k, verbose=False, print_interval=100):
"""
Seems to be the fastest kNN implementation. Pre-sorts each rows neighbors
and then filters these sorted indices using each columns mask of
observed values.
Important detail: If k observed values are not available then uses fewer
than k neighboring rows.
Parameters
----------
X : np.ndarray
Matrix to fill of shape (n_samples, n_features)
missing_mask : np.ndarray
Boolean array of same shape as X
k : int
verbose : bool
"""
start_t = time.time()
n_rows, n_cols = X.shape
# put the missing mask in column major order since it's accessed
# one column at a time
missing_mask_column_major = np.asarray(missing_mask, order="F")
observed_mask_column_major = ~missing_mask_column_major
X_column_major = X.copy(order="F")
X_row_major, D, effective_infinity = \
knn_initialize(X, missing_mask, verbose=verbose)
# get rid of infinities, replace them with a very large number
D_sorted = np.argsort(D, axis=1)
inv_D = 1.0 / D
D_valid_mask = D < effective_infinity
valid_distances_per_row = D_valid_mask.sum(axis=1)
# trim the number of other rows we consider to exclude those
# with infinite distances
D_sorted = [
D_sorted[i, :count]
for i, count in enumerate(valid_distances_per_row)
]
dot = np.dot
for i in range(n_rows):
missing_row = missing_mask[i, :]
missing_indices = np.where(missing_row)[0]
row_weights = inv_D[i, :]
if verbose and i % print_interval == 0:
print(
"Imputing row %d/%d with %d missing, elapsed time: %0.3f" % (
i + 1,
n_rows,
len(missing_indices),
time.time() - start_t))
candidate_neighbor_indices = D_sorted[i]
for j in missing_indices:
observed = observed_mask_column_major[:, j]
sorted_observed = observed[candidate_neighbor_indices]
observed_neighbor_indices = candidate_neighbor_indices[sorted_observed]
k_nearest_indices = observed_neighbor_indices[:k]
weights = row_weights[k_nearest_indices]
weight_sum = weights.sum()
if weight_sum > 0:
column = X_column_major[:, j]
values = column[k_nearest_indices]
X_row_major[i, j] = dot(values, weights) / weight_sum
return X_row_major |
0, module; 1, function_definition; 2, function_name:_sort_converters; 3, parameters; 4, block; 5, identifier:cls; 6, default_parameter; 7, expression_statement; 8, comment:# app_ready is True when called from DMP's AppConfig.ready(); 9, comment:# we can't sort before then because models aren't ready; 10, expression_statement; 11, if_statement; 12, identifier:app_ready; 13, False; 14, string; 15, assignment; 16, attribute; 17, block; 18, string_content:Sorts the converter functions; 19, attribute; 20, boolean_operator; 21, identifier:cls; 22, identifier:_sorting_enabled; 23, for_statement; 24, expression_statement; 25, identifier:cls; 26, identifier:_sorting_enabled; 27, attribute; 28, identifier:app_ready; 29, identifier:converter; 30, attribute; 31, block; 32, call; 33, identifier:cls; 34, identifier:_sorting_enabled; 35, identifier:cls; 36, identifier:converters; 37, expression_statement; 38, attribute; 39, argument_list; 40, call; 41, attribute; 42, identifier:sort; 43, keyword_argument; 44, attribute; 45, argument_list; 46, identifier:cls; 47, identifier:converters; 48, identifier:key; 49, call; 50, identifier:converter; 51, identifier:prepare_sort_key; 52, identifier:attrgetter; 53, argument_list; 54, string; 55, string_content:sort_key | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 6, 12; 6, 13; 7, 14; 10, 15; 11, 16; 11, 17; 14, 18; 15, 19; 15, 20; 16, 21; 16, 22; 17, 23; 17, 24; 19, 25; 19, 26; 20, 27; 20, 28; 23, 29; 23, 30; 23, 31; 24, 32; 27, 33; 27, 34; 30, 35; 30, 36; 31, 37; 32, 38; 32, 39; 37, 40; 38, 41; 38, 42; 39, 43; 40, 44; 40, 45; 41, 46; 41, 47; 43, 48; 43, 49; 44, 50; 44, 51; 49, 52; 49, 53; 53, 54; 54, 55 | def _sort_converters(cls, app_ready=False):
'''Sorts the converter functions'''
# app_ready is True when called from DMP's AppConfig.ready()
# we can't sort before then because models aren't ready
cls._sorting_enabled = cls._sorting_enabled or app_ready
if cls._sorting_enabled:
for converter in cls.converters:
converter.prepare_sort_key()
cls.converters.sort(key=attrgetter('sort_key')) |
0, module; 1, function_definition; 2, function_name:prepare_sort_key; 3, parameters; 4, block; 5, identifier:self; 6, expression_statement; 7, if_statement; 8, comment:# we reverse sort by ( len(mro), source code order ) so subclasses match first; 9, comment:# on same types, last declared method sorts first; 10, expression_statement; 11, string; 12, call; 13, block; 14, assignment; 15, string_content:Triggered by view_function._sort_converters when our sort key should be created.
This can't be called in the constructor because Django models might not be ready yet.; 16, identifier:isinstance; 17, argument_list; 18, try_statement; 19, try_statement; 20, attribute; 21, tuple; 22, attribute; 23, identifier:str; 24, block; 25, except_clause; 26, block; 27, except_clause; 28, identifier:self; 29, identifier:sort_key; 30, binary_operator:-1 * len(inspect.getmro(self.convert_type)); 31, binary_operator:-1 * self.source_order; 32, identifier:self; 33, identifier:convert_type; 34, expression_statement; 35, identifier:ValueError; 36, block; 37, expression_statement; 38, as_pattern; 39, block; 40, unary_operator; 41, call; 42, unary_operator; 43, attribute; 44, assignment; 45, raise_statement; 46, assignment; 47, identifier:LookupError; 48, as_pattern_target; 49, raise_statement; 50, integer:1; 51, identifier:len; 52, argument_list; 53, integer:1; 54, identifier:self; 55, identifier:source_order; 56, pattern_list; 57, call; 58, call; 59, attribute; 60, call; 61, identifier:e; 62, call; 63, call; 64, identifier:app_name; 65, identifier:model_name; 66, attribute; 67, argument_list; 68, identifier:ImproperlyConfigured; 69, argument_list; 70, identifier:self; 71, identifier:convert_type; 72, attribute; 73, argument_list; 74, identifier:ImproperlyConfigured; 75, argument_list; 76, attribute; 77, argument_list; 78, attribute; 79, identifier:split; 80, string; 81, call; 82, identifier:apps; 83, identifier:get_model; 84, identifier:app_name; 85, identifier:model_name; 86, call; 87, identifier:inspect; 88, identifier:getmro; 89, attribute; 90, identifier:self; 91, identifier:convert_type; 92, string_content:.; 93, attribute; 94, argument_list; 95, attribute; 96, argument_list; 97, identifier:self; 98, identifier:convert_type; 99, string:'"{}" is not a valid converter type. String-based converter types must be specified in "app.Model" format.'; 100, identifier:format; 101, attribute; 102, string:'"{}" is not a valid model name. {}'; 103, identifier:format; 104, attribute; 105, identifier:e; 106, identifier:self; 107, identifier:convert_type; 108, identifier:self; 109, identifier:convert_type | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 6, 11; 7, 12; 7, 13; 10, 14; 11, 15; 12, 16; 12, 17; 13, 18; 13, 19; 14, 20; 14, 21; 17, 22; 17, 23; 18, 24; 18, 25; 19, 26; 19, 27; 20, 28; 20, 29; 21, 30; 21, 31; 22, 32; 22, 33; 24, 34; 25, 35; 25, 36; 26, 37; 27, 38; 27, 39; 30, 40; 30, 41; 31, 42; 31, 43; 34, 44; 36, 45; 37, 46; 38, 47; 38, 48; 39, 49; 40, 50; 41, 51; 41, 52; 42, 53; 43, 54; 43, 55; 44, 56; 44, 57; 45, 58; 46, 59; 46, 60; 48, 61; 49, 62; 52, 63; 56, 64; 56, 65; 57, 66; 57, 67; 58, 68; 58, 69; 59, 70; 59, 71; 60, 72; 60, 73; 62, 74; 62, 75; 63, 76; 63, 77; 66, 78; 66, 79; 67, 80; 69, 81; 72, 82; 72, 83; 73, 84; 73, 85; 75, 86; 76, 87; 76, 88; 77, 89; 78, 90; 78, 91; 80, 92; 81, 93; 81, 94; 86, 95; 86, 96; 89, 97; 89, 98; 93, 99; 93, 100; 94, 101; 95, 102; 95, 103; 96, 104; 96, 105; 101, 106; 101, 107; 104, 108; 104, 109 | def prepare_sort_key(self):
'''
Triggered by view_function._sort_converters when our sort key should be created.
This can't be called in the constructor because Django models might not be ready yet.
'''
if isinstance(self.convert_type, str):
try:
app_name, model_name = self.convert_type.split('.')
except ValueError:
raise ImproperlyConfigured('"{}" is not a valid converter type. String-based converter types must be specified in "app.Model" format.'.format(self.convert_type))
try:
self.convert_type = apps.get_model(app_name, model_name)
except LookupError as e:
raise ImproperlyConfigured('"{}" is not a valid model name. {}'.format(self.convert_type, e))
# we reverse sort by ( len(mro), source code order ) so subclasses match first
# on same types, last declared method sorts first
self.sort_key = ( -1 * len(inspect.getmro(self.convert_type)), -1 * self.source_order ) |
0, module; 1, function_definition; 2, function_name:sort_key_for_numeric_suffixes; 3, parameters; 4, block; 5, identifier:path; 6, default_parameter; 7, default_parameter; 8, expression_statement; 9, expression_statement; 10, comment:# Remove suffix from path and convert to int; 11, if_statement; 12, return_statement; 13, identifier:sep; 14, string; 15, identifier:suffix_index; 16, unary_operator; 17, comment:"""
Sort files taking into account potentially absent suffixes like
somefile.dcd
somefile.1000.dcd
somefile.2000.dcd
To be used with sorted(..., key=callable).
"""; 18, assignment; 19, call; 20, block; 21, expression_list; 22, string_content:.; 23, integer:2; 24, identifier:chunks; 25, call; 26, attribute; 27, argument_list; 28, return_statement; 29, identifier:path; 30, integer:0; 31, attribute; 32, argument_list; 33, subscript; 34, identifier:isdigit; 35, expression_list; 36, identifier:path; 37, identifier:split; 38, identifier:sep; 39, identifier:chunks; 40, identifier:suffix_index; 41, call; 42, call; 43, attribute; 44, argument_list; 45, identifier:int; 46, argument_list; 47, identifier:sep; 48, identifier:join; 49, binary_operator:chunks[:suffix_index] + chunks[suffix_index+1:]; 50, subscript; 51, subscript; 52, subscript; 53, identifier:chunks; 54, identifier:suffix_index; 55, identifier:chunks; 56, slice; 57, identifier:chunks; 58, slice; 59, identifier:suffix_index; 60, binary_operator:suffix_index+1; 61, identifier:suffix_index; 62, integer:1 | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 6, 13; 6, 14; 7, 15; 7, 16; 8, 17; 9, 18; 11, 19; 11, 20; 12, 21; 14, 22; 16, 23; 18, 24; 18, 25; 19, 26; 19, 27; 20, 28; 21, 29; 21, 30; 25, 31; 25, 32; 26, 33; 26, 34; 28, 35; 31, 36; 31, 37; 32, 38; 33, 39; 33, 40; 35, 41; 35, 42; 41, 43; 41, 44; 42, 45; 42, 46; 43, 47; 43, 48; 44, 49; 46, 50; 49, 51; 49, 52; 50, 53; 50, 54; 51, 55; 51, 56; 52, 57; 52, 58; 56, 59; 58, 60; 60, 61; 60, 62 | def sort_key_for_numeric_suffixes(path, sep='.', suffix_index=-2):
"""
Sort files taking into account potentially absent suffixes like
somefile.dcd
somefile.1000.dcd
somefile.2000.dcd
To be used with sorted(..., key=callable).
"""
chunks = path.split(sep)
# Remove suffix from path and convert to int
if chunks[suffix_index].isdigit():
return sep.join(chunks[:suffix_index] + chunks[suffix_index+1:]), int(chunks[suffix_index])
return path, 0 |
0, module; 1, function_definition; 2, function_name:_get_query; 3, parameters; 4, block; 5, identifier:self; 6, identifier:cursor; 7, expression_statement; 8, expression_statement; 9, if_statement; 10, if_statement; 11, return_statement; 12, string; 13, assignment; 14, attribute; 15, block; 16, attribute; 17, block; 18, identifier:query; 19, string_content:Query tempalte for source Solr, sorts by id by default.; 20, identifier:query; 21, dictionary; 22, identifier:self; 23, identifier:_date_field; 24, expression_statement; 25, identifier:self; 26, identifier:_per_shard; 27, expression_statement; 28, pair; 29, pair; 30, pair; 31, pair; 32, assignment; 33, assignment; 34, string; 35, string; 36, string; 37, string; 38, string; 39, attribute; 40, string; 41, identifier:cursor; 42, subscript; 43, call; 44, subscript; 45, string; 46, string_content:q; 47, string_content:*:*; 48, string_content:sort; 49, string_content:id desc; 50, string_content:rows; 51, identifier:self; 52, identifier:_rows; 53, string_content:cursorMark; 54, identifier:query; 55, string; 56, attribute; 57, argument_list; 58, identifier:query; 59, string; 60, string_content:false; 61, string_content:sort; 62, string:"{} asc, id desc"; 63, identifier:format; 64, attribute; 65, string_content:distrib; 66, identifier:self; 67, identifier:_date_field | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 7, 12; 8, 13; 9, 14; 9, 15; 10, 16; 10, 17; 11, 18; 12, 19; 13, 20; 13, 21; 14, 22; 14, 23; 15, 24; 16, 25; 16, 26; 17, 27; 21, 28; 21, 29; 21, 30; 21, 31; 24, 32; 27, 33; 28, 34; 28, 35; 29, 36; 29, 37; 30, 38; 30, 39; 31, 40; 31, 41; 32, 42; 32, 43; 33, 44; 33, 45; 34, 46; 35, 47; 36, 48; 37, 49; 38, 50; 39, 51; 39, 52; 40, 53; 42, 54; 42, 55; 43, 56; 43, 57; 44, 58; 44, 59; 45, 60; 55, 61; 56, 62; 56, 63; 57, 64; 59, 65; 64, 66; 64, 67 | def _get_query(self, cursor):
'''
Query tempalte for source Solr, sorts by id by default.
'''
query = {'q':'*:*',
'sort':'id desc',
'rows':self._rows,
'cursorMark':cursor}
if self._date_field:
query['sort'] = "{} asc, id desc".format(self._date_field)
if self._per_shard:
query['distrib'] = 'false'
return query |
0, module; 1, function_definition; 2, function_name:url_params; 3, parameters; 4, block; 5, identifier:request; 6, default_parameter; 7, default_parameter; 8, expression_statement; 9, if_statement; 10, expression_statement; 11, for_statement; 12, if_statement; 13, return_statement; 14, identifier:except_params; 15, None; 16, identifier:as_is; 17, False; 18, comment:"""
create string with GET-params of request
usage example:
c['sort_url'] = url_params(request, except_params=('sort',))
...
<a href="{{ sort_url }}&sort=lab_number">Лабораторный номер</a>
"""; 19, not_operator; 20, block; 21, assignment; 22, pattern_list; 23, call; 24, block; 25, identifier:as_is; 26, block; 27, else_clause; 28, call; 29, attribute; 30, return_statement; 31, identifier:params; 32, list; 33, identifier:key; 34, identifier:value; 35, attribute; 36, argument_list; 37, if_statement; 38, expression_statement; 39, block; 40, identifier:mark_safe; 41, argument_list; 42, identifier:request; 43, identifier:GET; 44, string; 45, attribute; 46, identifier:items; 47, boolean_operator; 48, block; 49, assignment; 50, expression_statement; 51, expression_statement; 52, identifier:str_params; 53, identifier:request; 54, identifier:GET; 55, identifier:except_params; 56, comparison_operator:key not in except_params; 57, for_statement; 58, identifier:str_params; 59, binary_operator:'?' + '&'.join(params); 60, assignment; 61, assignment; 62, identifier:key; 63, identifier:except_params; 64, identifier:v; 65, call; 66, block; 67, string; 68, call; 69, identifier:str_params; 70, binary_operator:'?' + '&'.join(params); 71, identifier:str_params; 72, call; 73, attribute; 74, argument_list; 75, expression_statement; 76, string_content:?; 77, attribute; 78, argument_list; 79, string; 80, call; 81, identifier:urlquote; 82, argument_list; 83, attribute; 84, identifier:getlist; 85, identifier:key; 86, call; 87, string; 88, identifier:join; 89, identifier:params; 90, string_content:?; 91, attribute; 92, argument_list; 93, identifier:str_params; 94, identifier:request; 95, identifier:GET; 96, attribute; 97, argument_list; 98, string_content:&; 99, string; 100, identifier:join; 101, identifier:params; 102, identifier:params; 103, identifier:append; 104, binary_operator:'%s=%s' % (key, urlquote(v)); 105, string_content:&; 106, string; 107, tuple; 108, string_content:%s=%s; 109, identifier:key; 110, call; 111, identifier:urlquote; 112, argument_list; 113, identifier:v | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 6, 14; 6, 15; 7, 16; 7, 17; 8, 18; 9, 19; 9, 20; 10, 21; 11, 22; 11, 23; 11, 24; 12, 25; 12, 26; 12, 27; 13, 28; 19, 29; 20, 30; 21, 31; 21, 32; 22, 33; 22, 34; 23, 35; 23, 36; 24, 37; 26, 38; 27, 39; 28, 40; 28, 41; 29, 42; 29, 43; 30, 44; 35, 45; 35, 46; 37, 47; 37, 48; 38, 49; 39, 50; 39, 51; 41, 52; 45, 53; 45, 54; 47, 55; 47, 56; 48, 57; 49, 58; 49, 59; 50, 60; 51, 61; 56, 62; 56, 63; 57, 64; 57, 65; 57, 66; 59, 67; 59, 68; 60, 69; 60, 70; 61, 71; 61, 72; 65, 73; 65, 74; 66, 75; 67, 76; 68, 77; 68, 78; 70, 79; 70, 80; 72, 81; 72, 82; 73, 83; 73, 84; 74, 85; 75, 86; 77, 87; 77, 88; 78, 89; 79, 90; 80, 91; 80, 92; 82, 93; 83, 94; 83, 95; 86, 96; 86, 97; 87, 98; 91, 99; 91, 100; 92, 101; 96, 102; 96, 103; 97, 104; 99, 105; 104, 106; 104, 107; 106, 108; 107, 109; 107, 110; 110, 111; 110, 112; 112, 113 | def url_params(request, except_params=None, as_is=False):
"""
create string with GET-params of request
usage example:
c['sort_url'] = url_params(request, except_params=('sort',))
...
<a href="{{ sort_url }}&sort=lab_number">Лабораторный номер</a>
"""
if not request.GET:
return ''
params = []
for key, value in request.GET.items():
if except_params and key not in except_params:
for v in request.GET.getlist(key):
params.append('%s=%s' % (key, urlquote(v)))
if as_is:
str_params = '?' + '&'.join(params)
else:
str_params = '?' + '&'.join(params)
str_params = urlquote(str_params)
return mark_safe(str_params) |
0, module; 1, function_definition; 2, function_name:compose; 3, parameters; 4, block; 5, list_splat_pattern; 6, expression_statement; 7, comment:# slightly optimized for most common cases and hence verbose; 8, if_statement; 9, identifier:funcs; 10, comment:"""Compose `funcs` to a single function.
>>> compose(operator.abs, operator.add)(-2,-3)
5
>>> compose()('nada')
'nada'
>>> compose(sorted, set, partial(filter, None))(range(3)[::-1]*2)
[1, 2]
"""; 11, comparison_operator:len(funcs) == 2; 12, block; 13, elif_clause; 14, elif_clause; 15, elif_clause; 16, else_clause; 17, call; 18, integer:2; 19, expression_statement; 20, return_statement; 21, comparison_operator:len(funcs) == 3; 22, block; 23, comparison_operator:len(funcs) == 0; 24, block; 25, comparison_operator:len(funcs) == 1; 26, block; 27, block; 28, identifier:len; 29, argument_list; 30, assignment; 31, lambda; 32, call; 33, integer:3; 34, expression_statement; 35, return_statement; 36, call; 37, integer:0; 38, return_statement; 39, comment:# XXX single kwarg; 40, call; 41, integer:1; 42, return_statement; 43, function_definition; 44, return_statement; 45, identifier:funcs; 46, pattern_list; 47, identifier:funcs; 48, lambda_parameters; 49, call; 50, identifier:len; 51, argument_list; 52, assignment; 53, lambda; 54, identifier:len; 55, argument_list; 56, lambda; 57, identifier:len; 58, argument_list; 59, subscript; 60, function_name:composed; 61, parameters; 62, block; 63, identifier:composed; 64, identifier:f0; 65, identifier:f1; 66, list_splat_pattern; 67, dictionary_splat_pattern; 68, identifier:f0; 69, argument_list; 70, identifier:funcs; 71, pattern_list; 72, identifier:funcs; 73, lambda_parameters; 74, call; 75, identifier:funcs; 76, lambda_parameters; 77, identifier:x; 78, identifier:funcs; 79, identifier:funcs; 80, integer:0; 81, list_splat_pattern; 82, dictionary_splat_pattern; 83, expression_statement; 84, for_statement; 85, return_statement; 86, identifier:a; 87, identifier:kw; 88, call; 89, identifier:f0; 90, identifier:f1; 91, identifier:f2; 92, list_splat_pattern; 93, dictionary_splat_pattern; 94, identifier:f0; 95, argument_list; 96, identifier:x; 97, identifier:args; 98, identifier:kwargs; 99, assignment; 100, identifier:f; 101, subscript; 102, block; 103, identifier:y; 104, identifier:f1; 105, argument_list; 106, identifier:a; 107, identifier:kw; 108, call; 109, identifier:y; 110, call; 111, identifier:funcs; 112, slice; 113, expression_statement; 114, list_splat; 115, dictionary_splat; 116, identifier:f1; 117, argument_list; 118, subscript; 119, argument_list; 120, integer:0; 121, unary_operator; 122, assignment; 123, identifier:a; 124, identifier:kw; 125, call; 126, identifier:funcs; 127, unary_operator; 128, list_splat; 129, dictionary_splat; 130, integer:1; 131, identifier:y; 132, call; 133, identifier:f2; 134, argument_list; 135, integer:1; 136, identifier:args; 137, identifier:kwargs; 138, identifier:f; 139, argument_list; 140, list_splat; 141, dictionary_splat; 142, identifier:y; 143, identifier:a; 144, identifier:kw | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 5, 9; 6, 10; 8, 11; 8, 12; 8, 13; 8, 14; 8, 15; 8, 16; 11, 17; 11, 18; 12, 19; 12, 20; 13, 21; 13, 22; 14, 23; 14, 24; 15, 25; 15, 26; 16, 27; 17, 28; 17, 29; 19, 30; 20, 31; 21, 32; 21, 33; 22, 34; 22, 35; 23, 36; 23, 37; 24, 38; 24, 39; 25, 40; 25, 41; 26, 42; 27, 43; 27, 44; 29, 45; 30, 46; 30, 47; 31, 48; 31, 49; 32, 50; 32, 51; 34, 52; 35, 53; 36, 54; 36, 55; 38, 56; 40, 57; 40, 58; 42, 59; 43, 60; 43, 61; 43, 62; 44, 63; 46, 64; 46, 65; 48, 66; 48, 67; 49, 68; 49, 69; 51, 70; 52, 71; 52, 72; 53, 73; 53, 74; 55, 75; 56, 76; 56, 77; 58, 78; 59, 79; 59, 80; 61, 81; 61, 82; 62, 83; 62, 84; 62, 85; 66, 86; 67, 87; 69, 88; 71, 89; 71, 90; 71, 91; 73, 92; 73, 93; 74, 94; 74, 95; 76, 96; 81, 97; 82, 98; 83, 99; 84, 100; 84, 101; 84, 102; 85, 103; 88, 104; 88, 105; 92, 106; 93, 107; 95, 108; 99, 109; 99, 110; 101, 111; 101, 112; 102, 113; 105, 114; 105, 115; 108, 116; 108, 117; 110, 118; 110, 119; 112, 120; 112, 121; 113, 122; 114, 123; 115, 124; 117, 125; 118, 126; 118, 127; 119, 128; 119, 129; 121, 130; 122, 131; 122, 132; 125, 133; 125, 134; 127, 135; 128, 136; 129, 137; 132, 138; 132, 139; 134, 140; 134, 141; 139, 142; 140, 143; 141, 144 | def compose(*funcs):
"""Compose `funcs` to a single function.
>>> compose(operator.abs, operator.add)(-2,-3)
5
>>> compose()('nada')
'nada'
>>> compose(sorted, set, partial(filter, None))(range(3)[::-1]*2)
[1, 2]
"""
# slightly optimized for most common cases and hence verbose
if len(funcs) == 2: f0,f1=funcs; return lambda *a,**kw: f0(f1(*a,**kw))
elif len(funcs) == 3: f0,f1,f2=funcs; return lambda *a,**kw: f0(f1(f2(*a,**kw)))
elif len(funcs) == 0: return lambda x:x # XXX single kwarg
elif len(funcs) == 1: return funcs[0]
else:
def composed(*args,**kwargs):
y = funcs[-1](*args,**kwargs)
for f in funcs[:0:-1]: y = f(y)
return y
return composed |
0, module; 1, function_definition; 2, function_name:_sort_lines; 3, parameters; 4, block; 5, identifier:self; 6, expression_statement; 7, expression_statement; 8, comment:"""Haproxy writes its logs after having gathered all information
related to each specific connection. A simple request can be
really quick but others can be really slow, thus even if one connection
is logged later, it could have been accepted before others that are
already processed and logged.
This method sorts all valid log lines by their acceptance date,
providing the real order in which connections where made to the server.
"""; 9, assignment; 10, attribute; 11, call; 12, identifier:self; 13, identifier:_valid_lines; 14, identifier:sorted; 15, argument_list; 16, attribute; 17, keyword_argument; 18, identifier:self; 19, identifier:_valid_lines; 20, identifier:key; 21, lambda; 22, lambda_parameters; 23, attribute; 24, identifier:line; 25, identifier:line; 26, identifier:accept_date | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 6, 8; 7, 9; 9, 10; 9, 11; 10, 12; 10, 13; 11, 14; 11, 15; 15, 16; 15, 17; 16, 18; 16, 19; 17, 20; 17, 21; 21, 22; 21, 23; 22, 24; 23, 25; 23, 26 | def _sort_lines(self):
"""Haproxy writes its logs after having gathered all information
related to each specific connection. A simple request can be
really quick but others can be really slow, thus even if one connection
is logged later, it could have been accepted before others that are
already processed and logged.
This method sorts all valid log lines by their acceptance date,
providing the real order in which connections where made to the server.
"""
self._valid_lines = sorted(
self._valid_lines,
key=lambda line: line.accept_date,
) |
0, module; 1, function_definition; 2, function_name:_sort_and_trim; 3, parameters; 4, block; 5, identifier:data; 6, default_parameter; 7, expression_statement; 8, expression_statement; 9, expression_statement; 10, expression_statement; 11, return_statement; 12, identifier:reverse; 13, False; 14, comment:"""Sorts a dictionary with at least two fields on each of them sorting
by the second element.
.. warning::
Right now is hardcoded to 10 elements, improve the command line
interface to allow to send parameters to each command or globally.
"""; 15, assignment; 16, assignment; 17, assignment; 18, subscript; 19, identifier:threshold; 20, integer:10; 21, identifier:data_list; 22, call; 23, identifier:data_list; 24, call; 25, identifier:data_list; 26, slice; 27, attribute; 28, argument_list; 29, identifier:sorted; 30, argument_list; 31, identifier:threshold; 32, identifier:data; 33, identifier:items; 34, identifier:data_list; 35, keyword_argument; 36, keyword_argument; 37, identifier:key; 38, lambda; 39, identifier:reverse; 40, identifier:reverse; 41, lambda_parameters; 42, subscript; 43, identifier:data_info; 44, identifier:data_info; 45, integer:1 | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 6, 12; 6, 13; 7, 14; 8, 15; 9, 16; 10, 17; 11, 18; 15, 19; 15, 20; 16, 21; 16, 22; 17, 23; 17, 24; 18, 25; 18, 26; 22, 27; 22, 28; 24, 29; 24, 30; 26, 31; 27, 32; 27, 33; 30, 34; 30, 35; 30, 36; 35, 37; 35, 38; 36, 39; 36, 40; 38, 41; 38, 42; 41, 43; 42, 44; 42, 45 | def _sort_and_trim(data, reverse=False):
"""Sorts a dictionary with at least two fields on each of them sorting
by the second element.
.. warning::
Right now is hardcoded to 10 elements, improve the command line
interface to allow to send parameters to each command or globally.
"""
threshold = 10
data_list = data.items()
data_list = sorted(
data_list,
key=lambda data_info: data_info[1],
reverse=reverse,
)
return data_list[:threshold] |
0, module; 1, function_definition; 2, function_name:get_queryset; 3, parameters; 4, block; 5, identifier:self; 6, expression_statement; 7, expression_statement; 8, comment:# Perform global search; 9, expression_statement; 10, comment:# Perform column search; 11, expression_statement; 12, comment:# Return the ordered queryset; 13, return_statement; 14, string; 15, assignment; 16, assignment; 17, assignment; 18, call; 19, string_content:Apply Datatables sort and search criterion to QuerySet; 20, identifier:qs; 21, call; 22, identifier:qs; 23, call; 24, identifier:qs; 25, call; 26, attribute; 27, argument_list; 28, attribute; 29, argument_list; 30, attribute; 31, argument_list; 32, attribute; 33, argument_list; 34, identifier:qs; 35, identifier:order_by; 36, list_splat; 37, call; 38, identifier:get_queryset; 39, identifier:self; 40, identifier:global_search; 41, identifier:qs; 42, identifier:self; 43, identifier:column_search; 44, identifier:qs; 45, call; 46, identifier:super; 47, argument_list; 48, attribute; 49, argument_list; 50, identifier:DatatablesView; 51, identifier:self; 52, identifier:self; 53, identifier:get_orders | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 6, 14; 7, 15; 9, 16; 11, 17; 13, 18; 14, 19; 15, 20; 15, 21; 16, 22; 16, 23; 17, 24; 17, 25; 18, 26; 18, 27; 21, 28; 21, 29; 23, 30; 23, 31; 25, 32; 25, 33; 26, 34; 26, 35; 27, 36; 28, 37; 28, 38; 30, 39; 30, 40; 31, 41; 32, 42; 32, 43; 33, 44; 36, 45; 37, 46; 37, 47; 45, 48; 45, 49; 47, 50; 47, 51; 48, 52; 48, 53 | def get_queryset(self):
'''Apply Datatables sort and search criterion to QuerySet'''
qs = super(DatatablesView, self).get_queryset()
# Perform global search
qs = self.global_search(qs)
# Perform column search
qs = self.column_search(qs)
# Return the ordered queryset
return qs.order_by(*self.get_orders()) |
0, module; 1, function_definition; 2, function_name:rangify; 3, parameters; 4, block; 5, identifier:number_list; 6, expression_statement; 7, if_statement; 8, expression_statement; 9, expression_statement; 10, for_statement; 11, expression_statement; 12, return_statement; 13, comment:"""Assumes the list is sorted."""; 14, not_operator; 15, block; 16, assignment; 17, assignment; 18, identifier:num; 19, subscript; 20, block; 21, call; 22, identifier:ranges; 23, identifier:number_list; 24, return_statement; 25, identifier:ranges; 26, list; 27, identifier:range_start; 28, assignment; 29, identifier:number_list; 30, slice; 31, if_statement; 32, expression_statement; 33, attribute; 34, argument_list; 35, identifier:number_list; 36, identifier:prev_num; 37, subscript; 38, integer:1; 39, comparison_operator:num != (prev_num + 1); 40, block; 41, assignment; 42, identifier:ranges; 43, identifier:append; 44, tuple; 45, identifier:number_list; 46, integer:0; 47, identifier:num; 48, parenthesized_expression; 49, expression_statement; 50, expression_statement; 51, identifier:prev_num; 52, identifier:num; 53, identifier:range_start; 54, identifier:prev_num; 55, binary_operator:prev_num + 1; 56, call; 57, assignment; 58, identifier:prev_num; 59, integer:1; 60, attribute; 61, argument_list; 62, identifier:range_start; 63, identifier:num; 64, identifier:ranges; 65, identifier:append; 66, tuple; 67, identifier:range_start; 68, identifier:prev_num | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 6, 13; 7, 14; 7, 15; 8, 16; 9, 17; 10, 18; 10, 19; 10, 20; 11, 21; 12, 22; 14, 23; 15, 24; 16, 25; 16, 26; 17, 27; 17, 28; 19, 29; 19, 30; 20, 31; 20, 32; 21, 33; 21, 34; 24, 35; 28, 36; 28, 37; 30, 38; 31, 39; 31, 40; 32, 41; 33, 42; 33, 43; 34, 44; 37, 45; 37, 46; 39, 47; 39, 48; 40, 49; 40, 50; 41, 51; 41, 52; 44, 53; 44, 54; 48, 55; 49, 56; 50, 57; 55, 58; 55, 59; 56, 60; 56, 61; 57, 62; 57, 63; 60, 64; 60, 65; 61, 66; 66, 67; 66, 68 | def rangify(number_list):
"""Assumes the list is sorted."""
if not number_list:
return number_list
ranges = []
range_start = prev_num = number_list[0]
for num in number_list[1:]:
if num != (prev_num + 1):
ranges.append((range_start, prev_num))
range_start = num
prev_num = num
ranges.append((range_start, prev_num))
return ranges |
0, module; 1, function_definition; 2, function_name:get_solver; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, default_parameter; 8, dictionary_splat_pattern; 9, expression_statement; 10, expression_statement; 11, comment:# backward compatibility: name as the first feature; 12, if_statement; 13, comment:# in absence of other filters, config/env solver filters/name are used; 14, if_statement; 15, comment:# get the first solver that satisfies all filters; 16, try_statement; 17, identifier:name; 18, None; 19, identifier:refresh; 20, False; 21, identifier:filters; 22, comment:"""Load the configuration for a single solver.
Makes a blocking web call to `{endpoint}/solvers/remote/{solver_name}/`, where `{endpoint}`
is a URL configured for the client, and returns a :class:`.Solver` instance
that can be used to submit sampling problems to the D-Wave API and retrieve results.
Args:
name (str):
ID of the requested solver. ``None`` returns the default solver.
If default solver is not configured, ``None`` returns the first available
solver in ``Client``'s class (QPU/software/base).
**filters (keyword arguments, optional):
Dictionary of filters over features this solver has to have. For a list of
feature names and values, see: :meth:`~dwave.cloud.client.Client.get_solvers`.
order_by (callable/str, default='id'):
Solver sorting key function (or :class:`Solver` attribute name).
By default, solvers are sorted by ID/name.
refresh (bool):
Return solver from cache (if cached with ``get_solvers()``),
unless set to ``True``.
Returns:
:class:`.Solver`
Examples:
This example creates two solvers for a client instantiated from
a local system's auto-detected default configuration file, which configures
a connection to a D-Wave resource that provides two solvers. The first
uses the default solver, the second explicitly selects another solver.
>>> from dwave.cloud import Client
>>> client = Client.from_config()
>>> client.get_solvers() # doctest: +SKIP
[Solver(id='2000Q_ONLINE_SOLVER1'), Solver(id='2000Q_ONLINE_SOLVER2')]
>>> solver1 = client.get_solver() # doctest: +SKIP
>>> solver2 = client.get_solver(name='2000Q_ONLINE_SOLVER2') # doctest: +SKIP
>>> solver1.id # doctest: +SKIP
'2000Q_ONLINE_SOLVER1'
>>> solver2.id # doctest: +SKIP
'2000Q_ONLINE_SOLVER2'
>>> # code that uses client
>>> client.close() # doctest: +SKIP
"""; 23, call; 24, comparison_operator:name is not None; 25, block; 26, boolean_operator; 27, block; 28, block; 29, except_clause; 30, attribute; 31, argument_list; 32, identifier:name; 33, None; 34, expression_statement; 35, not_operator; 36, attribute; 37, expression_statement; 38, expression_statement; 39, return_statement; 40, identifier:IndexError; 41, block; 42, identifier:_LOGGER; 43, identifier:debug; 44, string:"Requested a solver that best matches feature filters=%r"; 45, identifier:filters; 46, call; 47, identifier:filters; 48, identifier:self; 49, identifier:default_solver; 50, assignment; 51, call; 52, subscript; 53, raise_statement; 54, attribute; 55, argument_list; 56, identifier:filters; 57, attribute; 58, attribute; 59, argument_list; 60, call; 61, integer:0; 62, call; 63, identifier:filters; 64, identifier:setdefault; 65, string; 66, identifier:name; 67, identifier:self; 68, identifier:default_solver; 69, identifier:_LOGGER; 70, identifier:debug; 71, string:"Fetching solvers according to filters=%r"; 72, identifier:filters; 73, attribute; 74, argument_list; 75, identifier:SolverNotFoundError; 76, argument_list; 77, string_content:name; 78, identifier:self; 79, identifier:get_solvers; 80, keyword_argument; 81, dictionary_splat; 82, string:"Solver with the requested features not available"; 83, identifier:refresh; 84, identifier:refresh; 85, identifier:filters | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 6, 17; 6, 18; 7, 19; 7, 20; 8, 21; 9, 22; 10, 23; 12, 24; 12, 25; 14, 26; 14, 27; 16, 28; 16, 29; 23, 30; 23, 31; 24, 32; 24, 33; 25, 34; 26, 35; 26, 36; 27, 37; 28, 38; 28, 39; 29, 40; 29, 41; 30, 42; 30, 43; 31, 44; 31, 45; 34, 46; 35, 47; 36, 48; 36, 49; 37, 50; 38, 51; 39, 52; 41, 53; 46, 54; 46, 55; 50, 56; 50, 57; 51, 58; 51, 59; 52, 60; 52, 61; 53, 62; 54, 63; 54, 64; 55, 65; 55, 66; 57, 67; 57, 68; 58, 69; 58, 70; 59, 71; 59, 72; 60, 73; 60, 74; 62, 75; 62, 76; 65, 77; 73, 78; 73, 79; 74, 80; 74, 81; 76, 82; 80, 83; 80, 84; 81, 85 | def get_solver(self, name=None, refresh=False, **filters):
"""Load the configuration for a single solver.
Makes a blocking web call to `{endpoint}/solvers/remote/{solver_name}/`, where `{endpoint}`
is a URL configured for the client, and returns a :class:`.Solver` instance
that can be used to submit sampling problems to the D-Wave API and retrieve results.
Args:
name (str):
ID of the requested solver. ``None`` returns the default solver.
If default solver is not configured, ``None`` returns the first available
solver in ``Client``'s class (QPU/software/base).
**filters (keyword arguments, optional):
Dictionary of filters over features this solver has to have. For a list of
feature names and values, see: :meth:`~dwave.cloud.client.Client.get_solvers`.
order_by (callable/str, default='id'):
Solver sorting key function (or :class:`Solver` attribute name).
By default, solvers are sorted by ID/name.
refresh (bool):
Return solver from cache (if cached with ``get_solvers()``),
unless set to ``True``.
Returns:
:class:`.Solver`
Examples:
This example creates two solvers for a client instantiated from
a local system's auto-detected default configuration file, which configures
a connection to a D-Wave resource that provides two solvers. The first
uses the default solver, the second explicitly selects another solver.
>>> from dwave.cloud import Client
>>> client = Client.from_config()
>>> client.get_solvers() # doctest: +SKIP
[Solver(id='2000Q_ONLINE_SOLVER1'), Solver(id='2000Q_ONLINE_SOLVER2')]
>>> solver1 = client.get_solver() # doctest: +SKIP
>>> solver2 = client.get_solver(name='2000Q_ONLINE_SOLVER2') # doctest: +SKIP
>>> solver1.id # doctest: +SKIP
'2000Q_ONLINE_SOLVER1'
>>> solver2.id # doctest: +SKIP
'2000Q_ONLINE_SOLVER2'
>>> # code that uses client
>>> client.close() # doctest: +SKIP
"""
_LOGGER.debug("Requested a solver that best matches feature filters=%r", filters)
# backward compatibility: name as the first feature
if name is not None:
filters.setdefault('name', name)
# in absence of other filters, config/env solver filters/name are used
if not filters and self.default_solver:
filters = self.default_solver
# get the first solver that satisfies all filters
try:
_LOGGER.debug("Fetching solvers according to filters=%r", filters)
return self.get_solvers(refresh=refresh, **filters)[0]
except IndexError:
raise SolverNotFoundError("Solver with the requested features not available") |
0, module; 1, function_definition; 2, function_name:find_germanet_xml_files; 3, parameters; 4, block; 5, identifier:xml_path; 6, expression_statement; 7, expression_statement; 8, comment:# sort out the lexical files; 9, expression_statement; 10, expression_statement; 11, if_statement; 12, comment:# sort out the GermaNet relations file; 13, expression_statement; 14, expression_statement; 15, if_statement; 16, comment:# sort out the wiktionary paraphrase files; 17, expression_statement; 18, expression_statement; 19, if_statement; 20, comment:# sort out the interlingual index file; 21, expression_statement; 22, expression_statement; 23, if_statement; 24, if_statement; 25, return_statement; 26, string; 27, assignment; 28, assignment; 29, assignment; 30, not_operator; 31, block; 32, assignment; 33, assignment; 34, not_operator; 35, block; 36, else_clause; 37, assignment; 38, assignment; 39, not_operator; 40, block; 41, assignment; 42, assignment; 43, not_operator; 44, block; 45, identifier:xml_files; 46, block; 47, expression_list; 48, string_content:Globs the XML files contained in the given directory and sorts
them into sections for import into the MongoDB database.
Arguments:
- `xml_path`: the path to the directory containing the GermaNet
XML files; 49, identifier:xml_files; 50, call; 51, identifier:lex_files; 52, list_comprehension; 53, identifier:xml_files; 54, call; 55, identifier:lex_files; 56, expression_statement; 57, identifier:gn_rels_file; 58, list_comprehension; 59, identifier:xml_files; 60, call; 61, identifier:gn_rels_file; 62, expression_statement; 63, expression_statement; 64, block; 65, identifier:wiktionary_files; 66, list_comprehension; 67, identifier:xml_files; 68, call; 69, identifier:wiktionary_files; 70, expression_statement; 71, identifier:ili_files; 72, list_comprehension; 73, identifier:xml_files; 74, call; 75, identifier:ili_files; 76, expression_statement; 77, expression_statement; 78, identifier:lex_files; 79, identifier:gn_rels_file; 80, identifier:wiktionary_files; 81, identifier:ili_files; 82, identifier:sorted; 83, argument_list; 84, identifier:xml_file; 85, for_in_clause; 86, if_clause; 87, identifier:sorted; 88, argument_list; 89, call; 90, identifier:xml_file; 91, for_in_clause; 92, if_clause; 93, identifier:sorted; 94, argument_list; 95, call; 96, assignment; 97, if_statement; 98, expression_statement; 99, identifier:xml_file; 100, for_in_clause; 101, if_clause; 102, identifier:sorted; 103, argument_list; 104, call; 105, identifier:xml_file; 106, for_in_clause; 107, if_clause; 108, identifier:sorted; 109, argument_list; 110, call; 111, call; 112, call; 113, identifier:xml_file; 114, identifier:xml_files; 115, call; 116, binary_operator:set(xml_files) - set(lex_files); 117, identifier:print; 118, argument_list; 119, identifier:xml_file; 120, identifier:xml_files; 121, comparison_operator:os.path.basename(xml_file).lower() == 'gn_relations.xml'; 122, binary_operator:set(xml_files) - set(gn_rels_file); 123, identifier:print; 124, argument_list; 125, identifier:gn_rels_file; 126, None; 127, comparison_operator:1 < len(gn_rels_file); 128, block; 129, assignment; 130, identifier:xml_file; 131, identifier:xml_files; 132, call; 133, binary_operator:set(xml_files) - set(wiktionary_files); 134, identifier:print; 135, argument_list; 136, identifier:xml_file; 137, identifier:xml_files; 138, call; 139, binary_operator:set(xml_files) - set(ili_files); 140, identifier:print; 141, argument_list; 142, identifier:print; 143, argument_list; 144, attribute; 145, argument_list; 146, attribute; 147, argument_list; 148, call; 149, call; 150, string; 151, call; 152, string; 153, call; 154, call; 155, string; 156, integer:1; 157, call; 158, expression_statement; 159, identifier:gn_rels_file; 160, subscript; 161, attribute; 162, argument_list; 163, call; 164, call; 165, string; 166, attribute; 167, argument_list; 168, call; 169, call; 170, string; 171, string; 172, identifier:xml_files; 173, identifier:glob; 174, identifier:glob; 175, call; 176, identifier:re; 177, identifier:match; 178, string; 179, call; 180, identifier:set; 181, argument_list; 182, identifier:set; 183, argument_list; 184, string_content:ERROR: cannot find lexical information files; 185, attribute; 186, argument_list; 187, string_content:gn_relations.xml; 188, identifier:set; 189, argument_list; 190, identifier:set; 191, argument_list; 192, string_content:ERROR: cannot find relations file gn_relations.xml; 193, identifier:len; 194, argument_list; 195, call; 196, identifier:gn_rels_file; 197, integer:0; 198, identifier:re; 199, identifier:match; 200, string; 201, call; 202, identifier:set; 203, argument_list; 204, identifier:set; 205, argument_list; 206, string_content:WARNING: cannot find wiktionary paraphrase files; 207, call; 208, identifier:startswith; 209, string; 210, identifier:set; 211, argument_list; 212, identifier:set; 213, argument_list; 214, string_content:WARNING: cannot find interlingual index file; 215, string_content:WARNING: unrecognised xml files:; 216, attribute; 217, argument_list; 218, string_content:(adj|nomen|verben)\.; 219, attribute; 220, argument_list; 221, identifier:xml_files; 222, identifier:lex_files; 223, call; 224, identifier:lower; 225, identifier:xml_files; 226, identifier:gn_rels_file; 227, identifier:gn_rels_file; 228, identifier:print; 229, argument_list; 230, string_content:wiktionaryparaphrases-; 231, attribute; 232, argument_list; 233, identifier:xml_files; 234, identifier:wiktionary_files; 235, attribute; 236, argument_list; 237, string_content:interlingualindex; 238, identifier:xml_files; 239, identifier:ili_files; 240, attribute; 241, identifier:join; 242, identifier:xml_path; 243, string; 244, call; 245, identifier:lower; 246, attribute; 247, argument_list; 248, concatenated_string; 249, call; 250, identifier:lower; 251, call; 252, identifier:lower; 253, identifier:os; 254, identifier:path; 255, string_content:*.xml; 256, attribute; 257, argument_list; 258, attribute; 259, identifier:basename; 260, identifier:xml_file; 261, string; 262, string; 263, attribute; 264, argument_list; 265, attribute; 266, argument_list; 267, attribute; 268, identifier:basename; 269, identifier:xml_file; 270, identifier:os; 271, identifier:path; 272, string_content:WARNING: more than one relations file gn_relations.xml,; 273, string_content:taking first match; 274, attribute; 275, identifier:basename; 276, identifier:xml_file; 277, attribute; 278, identifier:basename; 279, identifier:xml_file; 280, identifier:os; 281, identifier:path; 282, identifier:os; 283, identifier:path; 284, identifier:os; 285, identifier:path | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 4, 22; 4, 23; 4, 24; 4, 25; 6, 26; 7, 27; 9, 28; 10, 29; 11, 30; 11, 31; 13, 32; 14, 33; 15, 34; 15, 35; 15, 36; 17, 37; 18, 38; 19, 39; 19, 40; 21, 41; 22, 42; 23, 43; 23, 44; 24, 45; 24, 46; 25, 47; 26, 48; 27, 49; 27, 50; 28, 51; 28, 52; 29, 53; 29, 54; 30, 55; 31, 56; 32, 57; 32, 58; 33, 59; 33, 60; 34, 61; 35, 62; 35, 63; 36, 64; 37, 65; 37, 66; 38, 67; 38, 68; 39, 69; 40, 70; 41, 71; 41, 72; 42, 73; 42, 74; 43, 75; 44, 76; 46, 77; 47, 78; 47, 79; 47, 80; 47, 81; 50, 82; 50, 83; 52, 84; 52, 85; 52, 86; 54, 87; 54, 88; 56, 89; 58, 90; 58, 91; 58, 92; 60, 93; 60, 94; 62, 95; 63, 96; 64, 97; 64, 98; 66, 99; 66, 100; 66, 101; 68, 102; 68, 103; 70, 104; 72, 105; 72, 106; 72, 107; 74, 108; 74, 109; 76, 110; 77, 111; 83, 112; 85, 113; 85, 114; 86, 115; 88, 116; 89, 117; 89, 118; 91, 119; 91, 120; 92, 121; 94, 122; 95, 123; 95, 124; 96, 125; 96, 126; 97, 127; 97, 128; 98, 129; 100, 130; 100, 131; 101, 132; 103, 133; 104, 134; 104, 135; 106, 136; 106, 137; 107, 138; 109, 139; 110, 140; 110, 141; 111, 142; 111, 143; 112, 144; 112, 145; 115, 146; 115, 147; 116, 148; 116, 149; 118, 150; 121, 151; 121, 152; 122, 153; 122, 154; 124, 155; 127, 156; 127, 157; 128, 158; 129, 159; 129, 160; 132, 161; 132, 162; 133, 163; 133, 164; 135, 165; 138, 166; 138, 167; 139, 168; 139, 169; 141, 170; 143, 171; 143, 172; 144, 173; 144, 174; 145, 175; 146, 176; 146, 177; 147, 178; 147, 179; 148, 180; 148, 181; 149, 182; 149, 183; 150, 184; 151, 185; 151, 186; 152, 187; 153, 188; 153, 189; 154, 190; 154, 191; 155, 192; 157, 193; 157, 194; 158, 195; 160, 196; 160, 197; 161, 198; 161, 199; 162, 200; 162, 201; 163, 202; 163, 203; 164, 204; 164, 205; 165, 206; 166, 207; 166, 208; 167, 209; 168, 210; 168, 211; 169, 212; 169, 213; 170, 214; 171, 215; 175, 216; 175, 217; 178, 218; 179, 219; 179, 220; 181, 221; 183, 222; 185, 223; 185, 224; 189, 225; 191, 226; 194, 227; 195, 228; 195, 229; 200, 230; 201, 231; 201, 232; 203, 233; 205, 234; 207, 235; 207, 236; 209, 237; 211, 238; 213, 239; 216, 240; 216, 241; 217, 242; 217, 243; 219, 244; 219, 245; 223, 246; 223, 247; 229, 248; 231, 249; 231, 250; 235, 251; 235, 252; 240, 253; 240, 254; 243, 255; 244, 256; 244, 257; 246, 258; 246, 259; 247, 260; 248, 261; 248, 262; 249, 263; 249, 264; 251, 265; 251, 266; 256, 267; 256, 268; 257, 269; 258, 270; 258, 271; 261, 272; 262, 273; 263, 274; 263, 275; 264, 276; 265, 277; 265, 278; 266, 279; 267, 280; 267, 281; 274, 282; 274, 283; 277, 284; 277, 285 | def find_germanet_xml_files(xml_path):
'''
Globs the XML files contained in the given directory and sorts
them into sections for import into the MongoDB database.
Arguments:
- `xml_path`: the path to the directory containing the GermaNet
XML files
'''
xml_files = sorted(glob.glob(os.path.join(xml_path, '*.xml')))
# sort out the lexical files
lex_files = [xml_file for xml_file in xml_files if
re.match(r'(adj|nomen|verben)\.',
os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(lex_files))
if not lex_files:
print('ERROR: cannot find lexical information files')
# sort out the GermaNet relations file
gn_rels_file = [xml_file for xml_file in xml_files if
os.path.basename(xml_file).lower() == 'gn_relations.xml']
xml_files = sorted(set(xml_files) - set(gn_rels_file))
if not gn_rels_file:
print('ERROR: cannot find relations file gn_relations.xml')
gn_rels_file = None
else:
if 1 < len(gn_rels_file):
print ('WARNING: more than one relations file gn_relations.xml, '
'taking first match')
gn_rels_file = gn_rels_file[0]
# sort out the wiktionary paraphrase files
wiktionary_files = [xml_file for xml_file in xml_files if
re.match(r'wiktionaryparaphrases-',
os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(wiktionary_files))
if not wiktionary_files:
print('WARNING: cannot find wiktionary paraphrase files')
# sort out the interlingual index file
ili_files = [xml_file for xml_file in xml_files if
os.path.basename(xml_file).lower().startswith(
'interlingualindex')]
xml_files = sorted(set(xml_files) - set(ili_files))
if not ili_files:
print('WARNING: cannot find interlingual index file')
if xml_files:
print('WARNING: unrecognised xml files:', xml_files)
return lex_files, gn_rels_file, wiktionary_files, ili_files |
0, module; 1, function_definition; 2, function_name:iter_org_issues; 3, parameters; 4, block; 5, identifier:self; 6, identifier:name; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, default_parameter; 12, default_parameter; 13, default_parameter; 14, default_parameter; 15, expression_statement; 16, expression_statement; 17, comment:# issue_params will handle the since parameter; 18, expression_statement; 19, return_statement; 20, identifier:filter; 21, string; 22, identifier:state; 23, string; 24, identifier:labels; 25, string; 26, identifier:sort; 27, string; 28, identifier:direction; 29, string; 30, identifier:since; 31, None; 32, identifier:number; 33, unary_operator; 34, identifier:etag; 35, None; 36, comment:"""Iterate over the organnization's issues if the authenticated user
belongs to it.
:param str name: (required), name of the organization
:param str filter: accepted values:
('assigned', 'created', 'mentioned', 'subscribed')
api-default: 'assigned'
:param str state: accepted values: ('open', 'closed')
api-default: 'open'
:param str labels: comma-separated list of label names, e.g.,
'bug,ui,@high'
:param str sort: accepted values: ('created', 'updated', 'comments')
api-default: created
:param str direction: accepted values: ('asc', 'desc')
api-default: desc
:param since: (optional), Only issues after this date will
be returned. This can be a `datetime` or an ISO8601 formatted
date string, e.g., 2012-05-20T23:10:27Z
:type since: datetime or string
:param int number: (optional), number of issues to return. Default:
-1, returns all available issues
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Issue <github3.issues.Issue>`
"""; 37, assignment; 38, assignment; 39, call; 40, integer:1; 41, identifier:url; 42, call; 43, identifier:params; 44, call; 45, attribute; 46, argument_list; 47, attribute; 48, argument_list; 49, identifier:issue_params; 50, argument_list; 51, identifier:self; 52, identifier:_iter; 53, call; 54, identifier:url; 55, identifier:Issue; 56, identifier:params; 57, identifier:etag; 58, identifier:self; 59, identifier:_build_url; 60, string; 61, identifier:name; 62, string; 63, identifier:filter; 64, identifier:state; 65, identifier:labels; 66, identifier:sort; 67, identifier:direction; 68, identifier:since; 69, identifier:int; 70, argument_list; 71, string_content:orgs; 72, string_content:issues; 73, identifier:number | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 3, 12; 3, 13; 3, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 7, 20; 7, 21; 8, 22; 8, 23; 9, 24; 9, 25; 10, 26; 10, 27; 11, 28; 11, 29; 12, 30; 12, 31; 13, 32; 13, 33; 14, 34; 14, 35; 15, 36; 16, 37; 18, 38; 19, 39; 33, 40; 37, 41; 37, 42; 38, 43; 38, 44; 39, 45; 39, 46; 42, 47; 42, 48; 44, 49; 44, 50; 45, 51; 45, 52; 46, 53; 46, 54; 46, 55; 46, 56; 46, 57; 47, 58; 47, 59; 48, 60; 48, 61; 48, 62; 50, 63; 50, 64; 50, 65; 50, 66; 50, 67; 50, 68; 53, 69; 53, 70; 60, 71; 62, 72; 70, 73 | def iter_org_issues(self, name, filter='', state='', labels='', sort='',
direction='', since=None, number=-1, etag=None):
"""Iterate over the organnization's issues if the authenticated user
belongs to it.
:param str name: (required), name of the organization
:param str filter: accepted values:
('assigned', 'created', 'mentioned', 'subscribed')
api-default: 'assigned'
:param str state: accepted values: ('open', 'closed')
api-default: 'open'
:param str labels: comma-separated list of label names, e.g.,
'bug,ui,@high'
:param str sort: accepted values: ('created', 'updated', 'comments')
api-default: created
:param str direction: accepted values: ('asc', 'desc')
api-default: desc
:param since: (optional), Only issues after this date will
be returned. This can be a `datetime` or an ISO8601 formatted
date string, e.g., 2012-05-20T23:10:27Z
:type since: datetime or string
:param int number: (optional), number of issues to return. Default:
-1, returns all available issues
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Issue <github3.issues.Issue>`
"""
url = self._build_url('orgs', name, 'issues')
# issue_params will handle the since parameter
params = issue_params(filter, state, labels, sort, direction, since)
return self._iter(int(number), url, Issue, params, etag) |
0, module; 1, function_definition; 2, function_name:iter_repos; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, expression_statement; 12, expression_statement; 13, expression_statement; 14, if_statement; 15, if_statement; 16, if_statement; 17, return_statement; 18, identifier:type; 19, None; 20, identifier:sort; 21, None; 22, identifier:direction; 23, None; 24, identifier:number; 25, unary_operator; 26, identifier:etag; 27, None; 28, comment:"""List public repositories for the authenticated user.
.. versionchanged:: 0.6
Removed the login parameter for correctness. Use iter_user_repos
instead
:param str type: (optional), accepted values:
('all', 'owner', 'public', 'private', 'member')
API default: 'all'
:param str sort: (optional), accepted values:
('created', 'updated', 'pushed', 'full_name')
API default: 'created'
:param str direction: (optional), accepted values:
('asc', 'desc'), API default: 'asc' when using 'full_name',
'desc' otherwise
:param int number: (optional), number of repositories to return.
Default: -1 returns all repositories
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Repository <github3.repos.Repository>`
objects
"""; 29, assignment; 30, assignment; 31, comparison_operator:type in ('all', 'owner', 'public', 'private', 'member'); 32, block; 33, comparison_operator:sort in ('created', 'updated', 'pushed', 'full_name'); 34, block; 35, comparison_operator:direction in ('asc', 'desc'); 36, block; 37, call; 38, integer:1; 39, identifier:url; 40, call; 41, identifier:params; 42, dictionary; 43, identifier:type; 44, tuple; 45, expression_statement; 46, identifier:sort; 47, tuple; 48, expression_statement; 49, identifier:direction; 50, tuple; 51, expression_statement; 52, attribute; 53, argument_list; 54, attribute; 55, argument_list; 56, string; 57, string; 58, string; 59, string; 60, string; 61, call; 62, string; 63, string; 64, string; 65, string; 66, call; 67, string; 68, string; 69, call; 70, identifier:self; 71, identifier:_iter; 72, call; 73, identifier:url; 74, identifier:Repository; 75, identifier:params; 76, identifier:etag; 77, identifier:self; 78, identifier:_build_url; 79, string; 80, string; 81, string_content:all; 82, string_content:owner; 83, string_content:public; 84, string_content:private; 85, string_content:member; 86, attribute; 87, argument_list; 88, string_content:created; 89, string_content:updated; 90, string_content:pushed; 91, string_content:full_name; 92, attribute; 93, argument_list; 94, string_content:asc; 95, string_content:desc; 96, attribute; 97, argument_list; 98, identifier:int; 99, argument_list; 100, string_content:user; 101, string_content:repos; 102, identifier:params; 103, identifier:update; 104, keyword_argument; 105, identifier:params; 106, identifier:update; 107, keyword_argument; 108, identifier:params; 109, identifier:update; 110, keyword_argument; 111, identifier:number; 112, identifier:type; 113, identifier:type; 114, identifier:sort; 115, identifier:sort; 116, identifier:direction; 117, identifier:direction | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 6, 18; 6, 19; 7, 20; 7, 21; 8, 22; 8, 23; 9, 24; 9, 25; 10, 26; 10, 27; 11, 28; 12, 29; 13, 30; 14, 31; 14, 32; 15, 33; 15, 34; 16, 35; 16, 36; 17, 37; 25, 38; 29, 39; 29, 40; 30, 41; 30, 42; 31, 43; 31, 44; 32, 45; 33, 46; 33, 47; 34, 48; 35, 49; 35, 50; 36, 51; 37, 52; 37, 53; 40, 54; 40, 55; 44, 56; 44, 57; 44, 58; 44, 59; 44, 60; 45, 61; 47, 62; 47, 63; 47, 64; 47, 65; 48, 66; 50, 67; 50, 68; 51, 69; 52, 70; 52, 71; 53, 72; 53, 73; 53, 74; 53, 75; 53, 76; 54, 77; 54, 78; 55, 79; 55, 80; 56, 81; 57, 82; 58, 83; 59, 84; 60, 85; 61, 86; 61, 87; 62, 88; 63, 89; 64, 90; 65, 91; 66, 92; 66, 93; 67, 94; 68, 95; 69, 96; 69, 97; 72, 98; 72, 99; 79, 100; 80, 101; 86, 102; 86, 103; 87, 104; 92, 105; 92, 106; 93, 107; 96, 108; 96, 109; 97, 110; 99, 111; 104, 112; 104, 113; 107, 114; 107, 115; 110, 116; 110, 117 | def iter_repos(self, type=None, sort=None, direction=None, number=-1,
etag=None):
"""List public repositories for the authenticated user.
.. versionchanged:: 0.6
Removed the login parameter for correctness. Use iter_user_repos
instead
:param str type: (optional), accepted values:
('all', 'owner', 'public', 'private', 'member')
API default: 'all'
:param str sort: (optional), accepted values:
('created', 'updated', 'pushed', 'full_name')
API default: 'created'
:param str direction: (optional), accepted values:
('asc', 'desc'), API default: 'asc' when using 'full_name',
'desc' otherwise
:param int number: (optional), number of repositories to return.
Default: -1 returns all repositories
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Repository <github3.repos.Repository>`
objects
"""
url = self._build_url('user', 'repos')
params = {}
if type in ('all', 'owner', 'public', 'private', 'member'):
params.update(type=type)
if sort in ('created', 'updated', 'pushed', 'full_name'):
params.update(sort=sort)
if direction in ('asc', 'desc'):
params.update(direction=direction)
return self._iter(int(number), url, Repository, params, etag) |
0, module; 1, function_definition; 2, function_name:iter_starred; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, expression_statement; 12, if_statement; 13, expression_statement; 14, expression_statement; 15, expression_statement; 16, return_statement; 17, identifier:login; 18, None; 19, identifier:sort; 20, None; 21, identifier:direction; 22, None; 23, identifier:number; 24, unary_operator; 25, identifier:etag; 26, None; 27, comment:"""Iterate over repositories starred by ``login`` or the authenticated
user.
.. versionchanged:: 0.5
Added sort and direction parameters (optional) as per the change in
GitHub's API.
:param str login: (optional), name of user whose stars you want to see
:param str sort: (optional), either 'created' (when the star was
created) or 'updated' (when the repository was last pushed to)
:param str direction: (optional), either 'asc' or 'desc'. Default:
'desc'
:param int number: (optional), number of repositories to return.
Default: -1 returns all repositories
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Repository <github3.repos.Repository>`
"""; 28, identifier:login; 29, block; 30, assignment; 31, call; 32, assignment; 33, call; 34, integer:1; 35, return_statement; 36, identifier:params; 37, dictionary; 38, attribute; 39, argument_list; 40, identifier:url; 41, call; 42, attribute; 43, argument_list; 44, call; 45, pair; 46, pair; 47, identifier:self; 48, identifier:_remove_none; 49, identifier:params; 50, attribute; 51, argument_list; 52, identifier:self; 53, identifier:_iter; 54, call; 55, identifier:url; 56, identifier:Repository; 57, identifier:params; 58, identifier:etag; 59, attribute; 60, argument_list; 61, string; 62, identifier:sort; 63, string; 64, identifier:direction; 65, identifier:self; 66, identifier:_build_url; 67, string; 68, string; 69, identifier:int; 70, argument_list; 71, call; 72, identifier:iter_starred; 73, identifier:sort; 74, identifier:direction; 75, string_content:sort; 76, string_content:direction; 77, string_content:user; 78, string_content:starred; 79, identifier:number; 80, attribute; 81, argument_list; 82, identifier:self; 83, identifier:user; 84, identifier:login | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 6, 17; 6, 18; 7, 19; 7, 20; 8, 21; 8, 22; 9, 23; 9, 24; 10, 25; 10, 26; 11, 27; 12, 28; 12, 29; 13, 30; 14, 31; 15, 32; 16, 33; 24, 34; 29, 35; 30, 36; 30, 37; 31, 38; 31, 39; 32, 40; 32, 41; 33, 42; 33, 43; 35, 44; 37, 45; 37, 46; 38, 47; 38, 48; 39, 49; 41, 50; 41, 51; 42, 52; 42, 53; 43, 54; 43, 55; 43, 56; 43, 57; 43, 58; 44, 59; 44, 60; 45, 61; 45, 62; 46, 63; 46, 64; 50, 65; 50, 66; 51, 67; 51, 68; 54, 69; 54, 70; 59, 71; 59, 72; 60, 73; 60, 74; 61, 75; 63, 76; 67, 77; 68, 78; 70, 79; 71, 80; 71, 81; 80, 82; 80, 83; 81, 84 | def iter_starred(self, login=None, sort=None, direction=None, number=-1,
etag=None):
"""Iterate over repositories starred by ``login`` or the authenticated
user.
.. versionchanged:: 0.5
Added sort and direction parameters (optional) as per the change in
GitHub's API.
:param str login: (optional), name of user whose stars you want to see
:param str sort: (optional), either 'created' (when the star was
created) or 'updated' (when the repository was last pushed to)
:param str direction: (optional), either 'asc' or 'desc'. Default:
'desc'
:param int number: (optional), number of repositories to return.
Default: -1 returns all repositories
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Repository <github3.repos.Repository>`
"""
if login:
return self.user(login).iter_starred(sort, direction)
params = {'sort': sort, 'direction': direction}
self._remove_none(params)
url = self._build_url('user', 'starred')
return self._iter(int(number), url, Repository, params, etag) |
0, module; 1, function_definition; 2, function_name:search_code; 3, parameters; 4, block; 5, identifier:self; 6, identifier:query; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, default_parameter; 12, default_parameter; 13, expression_statement; 14, expression_statement; 15, expression_statement; 16, if_statement; 17, if_statement; 18, if_statement; 19, expression_statement; 20, return_statement; 21, identifier:sort; 22, None; 23, identifier:order; 24, None; 25, identifier:per_page; 26, None; 27, identifier:text_match; 28, False; 29, identifier:number; 30, unary_operator; 31, identifier:etag; 32, None; 33, comment:"""Find code via the code search API.
The query can contain any combination of the following supported
qualifiers:
- ``in`` Qualifies which fields are searched. With this qualifier you
can restrict the search to just the file contents, the file path, or
both.
- ``language`` Searches code based on the language it’s written in.
- ``fork`` Specifies that code from forked repositories should be
searched. Repository forks will not be searchable unless the fork
has more stars than the parent repository.
- ``size`` Finds files that match a certain size (in bytes).
- ``path`` Specifies the path that the resulting file must be at.
- ``extension`` Matches files with a certain extension.
- ``user`` or ``repo`` Limits searches to a specific user or
repository.
For more information about these qualifiers, see: http://git.io/-DvAuA
:param str query: (required), a valid query as described above, e.g.,
``addClass in:file language:js repo:jquery/jquery``
:param str sort: (optional), how the results should be sorted;
option(s): ``indexed``; default: best match
:param str order: (optional), the direction of the sorted results,
options: ``asc``, ``desc``; default: ``desc``
:param int per_page: (optional)
:param bool text_match: (optional), if True, return matching search
terms. See http://git.io/iRmJxg for more information
:param int number: (optional), number of repositories to return.
Default: -1, returns all available repositories
:param str etag: (optional), previous ETag header value
:return: generator of :class:`CodeSearchResult
<github3.search.CodeSearchResult>`
"""; 34, assignment; 35, assignment; 36, comparison_operator:sort == 'indexed'; 37, block; 38, boolean_operator; 39, block; 40, identifier:text_match; 41, block; 42, assignment; 43, call; 44, integer:1; 45, identifier:params; 46, dictionary; 47, identifier:headers; 48, dictionary; 49, identifier:sort; 50, string; 51, expression_statement; 52, identifier:sort; 53, comparison_operator:order in ('asc', 'desc'); 54, expression_statement; 55, expression_statement; 56, identifier:url; 57, call; 58, identifier:SearchIterator; 59, argument_list; 60, pair; 61, string_content:indexed; 62, assignment; 63, identifier:order; 64, tuple; 65, assignment; 66, assignment; 67, attribute; 68, argument_list; 69, identifier:number; 70, identifier:url; 71, identifier:CodeSearchResult; 72, identifier:self; 73, identifier:params; 74, identifier:etag; 75, identifier:headers; 76, string; 77, identifier:query; 78, subscript; 79, identifier:sort; 80, string; 81, string; 82, subscript; 83, identifier:order; 84, identifier:headers; 85, dictionary; 86, identifier:self; 87, identifier:_build_url; 88, string; 89, string; 90, string_content:q; 91, identifier:params; 92, string; 93, string_content:asc; 94, string_content:desc; 95, identifier:params; 96, string; 97, pair; 98, string_content:search; 99, string_content:code; 100, string_content:sort; 101, string_content:order; 102, string; 103, string; 104, string_content:Accept; 105, string_content:application/vnd.github.v3.full.text-match+json | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 3, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 7, 21; 7, 22; 8, 23; 8, 24; 9, 25; 9, 26; 10, 27; 10, 28; 11, 29; 11, 30; 12, 31; 12, 32; 13, 33; 14, 34; 15, 35; 16, 36; 16, 37; 17, 38; 17, 39; 18, 40; 18, 41; 19, 42; 20, 43; 30, 44; 34, 45; 34, 46; 35, 47; 35, 48; 36, 49; 36, 50; 37, 51; 38, 52; 38, 53; 39, 54; 41, 55; 42, 56; 42, 57; 43, 58; 43, 59; 46, 60; 50, 61; 51, 62; 53, 63; 53, 64; 54, 65; 55, 66; 57, 67; 57, 68; 59, 69; 59, 70; 59, 71; 59, 72; 59, 73; 59, 74; 59, 75; 60, 76; 60, 77; 62, 78; 62, 79; 64, 80; 64, 81; 65, 82; 65, 83; 66, 84; 66, 85; 67, 86; 67, 87; 68, 88; 68, 89; 76, 90; 78, 91; 78, 92; 80, 93; 81, 94; 82, 95; 82, 96; 85, 97; 88, 98; 89, 99; 92, 100; 96, 101; 97, 102; 97, 103; 102, 104; 103, 105 | def search_code(self, query, sort=None, order=None, per_page=None,
text_match=False, number=-1, etag=None):
"""Find code via the code search API.
The query can contain any combination of the following supported
qualifiers:
- ``in`` Qualifies which fields are searched. With this qualifier you
can restrict the search to just the file contents, the file path, or
both.
- ``language`` Searches code based on the language it’s written in.
- ``fork`` Specifies that code from forked repositories should be
searched. Repository forks will not be searchable unless the fork
has more stars than the parent repository.
- ``size`` Finds files that match a certain size (in bytes).
- ``path`` Specifies the path that the resulting file must be at.
- ``extension`` Matches files with a certain extension.
- ``user`` or ``repo`` Limits searches to a specific user or
repository.
For more information about these qualifiers, see: http://git.io/-DvAuA
:param str query: (required), a valid query as described above, e.g.,
``addClass in:file language:js repo:jquery/jquery``
:param str sort: (optional), how the results should be sorted;
option(s): ``indexed``; default: best match
:param str order: (optional), the direction of the sorted results,
options: ``asc``, ``desc``; default: ``desc``
:param int per_page: (optional)
:param bool text_match: (optional), if True, return matching search
terms. See http://git.io/iRmJxg for more information
:param int number: (optional), number of repositories to return.
Default: -1, returns all available repositories
:param str etag: (optional), previous ETag header value
:return: generator of :class:`CodeSearchResult
<github3.search.CodeSearchResult>`
"""
params = {'q': query}
headers = {}
if sort == 'indexed':
params['sort'] = sort
if sort and order in ('asc', 'desc'):
params['order'] = order
if text_match:
headers = {
'Accept': 'application/vnd.github.v3.full.text-match+json'
}
url = self._build_url('search', 'code')
return SearchIterator(number, url, CodeSearchResult, self, params,
etag, headers) |
0, module; 1, function_definition; 2, function_name:search_issues; 3, parameters; 4, block; 5, identifier:self; 6, identifier:query; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, default_parameter; 12, default_parameter; 13, expression_statement; 14, expression_statement; 15, expression_statement; 16, if_statement; 17, if_statement; 18, if_statement; 19, expression_statement; 20, return_statement; 21, identifier:sort; 22, None; 23, identifier:order; 24, None; 25, identifier:per_page; 26, None; 27, identifier:text_match; 28, False; 29, identifier:number; 30, unary_operator; 31, identifier:etag; 32, None; 33, comment:"""Find issues by state and keyword
The query can contain any combination of the following supported
qualifers:
- ``type`` With this qualifier you can restrict the search to issues
or pull request only.
- ``in`` Qualifies which fields are searched. With this qualifier you
can restrict the search to just the title, body, comments, or any
combination of these.
- ``author`` Finds issues created by a certain user.
- ``assignee`` Finds issues that are assigned to a certain user.
- ``mentions`` Finds issues that mention a certain user.
- ``commenter`` Finds issues that a certain user commented on.
- ``involves`` Finds issues that were either created by a certain user,
assigned to that user, mention that user, or were commented on by
that user.
- ``state`` Filter issues based on whether they’re open or closed.
- ``labels`` Filters issues based on their labels.
- ``language`` Searches for issues within repositories that match a
certain language.
- ``created`` or ``updated`` Filters issues based on times of creation,
or when they were last updated.
- ``comments`` Filters issues based on the quantity of comments.
- ``user`` or ``repo`` Limits searches to a specific user or
repository.
For more information about these qualifiers, see: http://git.io/d1oELA
:param str query: (required), a valid query as described above, e.g.,
``windows label:bug``
:param str sort: (optional), how the results should be sorted;
options: ``created``, ``comments``, ``updated``;
default: best match
:param str order: (optional), the direction of the sorted results,
options: ``asc``, ``desc``; default: ``desc``
:param int per_page: (optional)
:param bool text_match: (optional), if True, return matching search
terms. See http://git.io/QLQuSQ for more information
:param int number: (optional), number of issues to return.
Default: -1, returns all available issues
:param str etag: (optional), previous ETag header value
:return: generator of :class:`IssueSearchResult
<github3.search.IssueSearchResult>`
"""; 34, assignment; 35, assignment; 36, comparison_operator:sort in ('comments', 'created', 'updated'); 37, block; 38, comparison_operator:order in ('asc', 'desc'); 39, block; 40, identifier:text_match; 41, block; 42, assignment; 43, call; 44, integer:1; 45, identifier:params; 46, dictionary; 47, identifier:headers; 48, dictionary; 49, identifier:sort; 50, tuple; 51, expression_statement; 52, identifier:order; 53, tuple; 54, expression_statement; 55, expression_statement; 56, identifier:url; 57, call; 58, identifier:SearchIterator; 59, argument_list; 60, pair; 61, string; 62, string; 63, string; 64, assignment; 65, string; 66, string; 67, assignment; 68, assignment; 69, attribute; 70, argument_list; 71, identifier:number; 72, identifier:url; 73, identifier:IssueSearchResult; 74, identifier:self; 75, identifier:params; 76, identifier:etag; 77, identifier:headers; 78, string; 79, identifier:query; 80, string_content:comments; 81, string_content:created; 82, string_content:updated; 83, subscript; 84, identifier:sort; 85, string_content:asc; 86, string_content:desc; 87, subscript; 88, identifier:order; 89, identifier:headers; 90, dictionary; 91, identifier:self; 92, identifier:_build_url; 93, string; 94, string; 95, string_content:q; 96, identifier:params; 97, string; 98, identifier:params; 99, string; 100, pair; 101, string_content:search; 102, string_content:issues; 103, string_content:sort; 104, string_content:order; 105, string; 106, string; 107, string_content:Accept; 108, string_content:application/vnd.github.v3.full.text-match+json | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 3, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 7, 21; 7, 22; 8, 23; 8, 24; 9, 25; 9, 26; 10, 27; 10, 28; 11, 29; 11, 30; 12, 31; 12, 32; 13, 33; 14, 34; 15, 35; 16, 36; 16, 37; 17, 38; 17, 39; 18, 40; 18, 41; 19, 42; 20, 43; 30, 44; 34, 45; 34, 46; 35, 47; 35, 48; 36, 49; 36, 50; 37, 51; 38, 52; 38, 53; 39, 54; 41, 55; 42, 56; 42, 57; 43, 58; 43, 59; 46, 60; 50, 61; 50, 62; 50, 63; 51, 64; 53, 65; 53, 66; 54, 67; 55, 68; 57, 69; 57, 70; 59, 71; 59, 72; 59, 73; 59, 74; 59, 75; 59, 76; 59, 77; 60, 78; 60, 79; 61, 80; 62, 81; 63, 82; 64, 83; 64, 84; 65, 85; 66, 86; 67, 87; 67, 88; 68, 89; 68, 90; 69, 91; 69, 92; 70, 93; 70, 94; 78, 95; 83, 96; 83, 97; 87, 98; 87, 99; 90, 100; 93, 101; 94, 102; 97, 103; 99, 104; 100, 105; 100, 106; 105, 107; 106, 108 | def search_issues(self, query, sort=None, order=None, per_page=None,
text_match=False, number=-1, etag=None):
"""Find issues by state and keyword
The query can contain any combination of the following supported
qualifers:
- ``type`` With this qualifier you can restrict the search to issues
or pull request only.
- ``in`` Qualifies which fields are searched. With this qualifier you
can restrict the search to just the title, body, comments, or any
combination of these.
- ``author`` Finds issues created by a certain user.
- ``assignee`` Finds issues that are assigned to a certain user.
- ``mentions`` Finds issues that mention a certain user.
- ``commenter`` Finds issues that a certain user commented on.
- ``involves`` Finds issues that were either created by a certain user,
assigned to that user, mention that user, or were commented on by
that user.
- ``state`` Filter issues based on whether they’re open or closed.
- ``labels`` Filters issues based on their labels.
- ``language`` Searches for issues within repositories that match a
certain language.
- ``created`` or ``updated`` Filters issues based on times of creation,
or when they were last updated.
- ``comments`` Filters issues based on the quantity of comments.
- ``user`` or ``repo`` Limits searches to a specific user or
repository.
For more information about these qualifiers, see: http://git.io/d1oELA
:param str query: (required), a valid query as described above, e.g.,
``windows label:bug``
:param str sort: (optional), how the results should be sorted;
options: ``created``, ``comments``, ``updated``;
default: best match
:param str order: (optional), the direction of the sorted results,
options: ``asc``, ``desc``; default: ``desc``
:param int per_page: (optional)
:param bool text_match: (optional), if True, return matching search
terms. See http://git.io/QLQuSQ for more information
:param int number: (optional), number of issues to return.
Default: -1, returns all available issues
:param str etag: (optional), previous ETag header value
:return: generator of :class:`IssueSearchResult
<github3.search.IssueSearchResult>`
"""
params = {'q': query}
headers = {}
if sort in ('comments', 'created', 'updated'):
params['sort'] = sort
if order in ('asc', 'desc'):
params['order'] = order
if text_match:
headers = {
'Accept': 'application/vnd.github.v3.full.text-match+json'
}
url = self._build_url('search', 'issues')
return SearchIterator(number, url, IssueSearchResult, self, params,
etag, headers) |
0, module; 1, function_definition; 2, function_name:search_users; 3, parameters; 4, block; 5, identifier:self; 6, identifier:query; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, default_parameter; 12, default_parameter; 13, expression_statement; 14, expression_statement; 15, expression_statement; 16, if_statement; 17, if_statement; 18, if_statement; 19, expression_statement; 20, return_statement; 21, identifier:sort; 22, None; 23, identifier:order; 24, None; 25, identifier:per_page; 26, None; 27, identifier:text_match; 28, False; 29, identifier:number; 30, unary_operator; 31, identifier:etag; 32, None; 33, comment:"""Find users via the Search API.
The query can contain any combination of the following supported
qualifers:
- ``type`` With this qualifier you can restrict the search to just
personal accounts or just organization accounts.
- ``in`` Qualifies which fields are searched. With this qualifier you
can restrict the search to just the username, public email, full
name, or any combination of these.
- ``repos`` Filters users based on the number of repositories they
have.
- ``location`` Filter users by the location indicated in their
profile.
- ``language`` Search for users that have repositories that match a
certain language.
- ``created`` Filter users based on when they joined.
- ``followers`` Filter users based on the number of followers they
have.
For more information about these qualifiers see: http://git.io/wjVYJw
:param str query: (required), a valid query as described above, e.g.,
``tom repos:>42 followers:>1000``
:param str sort: (optional), how the results should be sorted;
options: ``followers``, ``repositories``, or ``joined``; default:
best match
:param str order: (optional), the direction of the sorted results,
options: ``asc``, ``desc``; default: ``desc``
:param int per_page: (optional)
:param bool text_match: (optional), if True, return matching search
terms. See http://git.io/_V1zRwa for more information
:param int number: (optional), number of search results to return;
Default: -1 returns all available
:param str etag: (optional), ETag header value of the last request.
:return: generator of :class:`UserSearchResult
<github3.search.UserSearchResult>`
"""; 34, assignment; 35, assignment; 36, comparison_operator:sort in ('followers', 'repositories', 'joined'); 37, block; 38, comparison_operator:order in ('asc', 'desc'); 39, block; 40, identifier:text_match; 41, block; 42, assignment; 43, call; 44, integer:1; 45, identifier:params; 46, dictionary; 47, identifier:headers; 48, dictionary; 49, identifier:sort; 50, tuple; 51, expression_statement; 52, identifier:order; 53, tuple; 54, expression_statement; 55, expression_statement; 56, identifier:url; 57, call; 58, identifier:SearchIterator; 59, argument_list; 60, pair; 61, string; 62, string; 63, string; 64, assignment; 65, string; 66, string; 67, assignment; 68, assignment; 69, attribute; 70, argument_list; 71, identifier:number; 72, identifier:url; 73, identifier:UserSearchResult; 74, identifier:self; 75, identifier:params; 76, identifier:etag; 77, identifier:headers; 78, string; 79, identifier:query; 80, string_content:followers; 81, string_content:repositories; 82, string_content:joined; 83, subscript; 84, identifier:sort; 85, string_content:asc; 86, string_content:desc; 87, subscript; 88, identifier:order; 89, identifier:headers; 90, dictionary; 91, identifier:self; 92, identifier:_build_url; 93, string; 94, string; 95, string_content:q; 96, identifier:params; 97, string; 98, identifier:params; 99, string; 100, pair; 101, string_content:search; 102, string_content:users; 103, string_content:sort; 104, string_content:order; 105, string; 106, string; 107, string_content:Accept; 108, string_content:application/vnd.github.v3.full.text-match+json | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 3, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 7, 21; 7, 22; 8, 23; 8, 24; 9, 25; 9, 26; 10, 27; 10, 28; 11, 29; 11, 30; 12, 31; 12, 32; 13, 33; 14, 34; 15, 35; 16, 36; 16, 37; 17, 38; 17, 39; 18, 40; 18, 41; 19, 42; 20, 43; 30, 44; 34, 45; 34, 46; 35, 47; 35, 48; 36, 49; 36, 50; 37, 51; 38, 52; 38, 53; 39, 54; 41, 55; 42, 56; 42, 57; 43, 58; 43, 59; 46, 60; 50, 61; 50, 62; 50, 63; 51, 64; 53, 65; 53, 66; 54, 67; 55, 68; 57, 69; 57, 70; 59, 71; 59, 72; 59, 73; 59, 74; 59, 75; 59, 76; 59, 77; 60, 78; 60, 79; 61, 80; 62, 81; 63, 82; 64, 83; 64, 84; 65, 85; 66, 86; 67, 87; 67, 88; 68, 89; 68, 90; 69, 91; 69, 92; 70, 93; 70, 94; 78, 95; 83, 96; 83, 97; 87, 98; 87, 99; 90, 100; 93, 101; 94, 102; 97, 103; 99, 104; 100, 105; 100, 106; 105, 107; 106, 108 | def search_users(self, query, sort=None, order=None, per_page=None,
text_match=False, number=-1, etag=None):
"""Find users via the Search API.
The query can contain any combination of the following supported
qualifers:
- ``type`` With this qualifier you can restrict the search to just
personal accounts or just organization accounts.
- ``in`` Qualifies which fields are searched. With this qualifier you
can restrict the search to just the username, public email, full
name, or any combination of these.
- ``repos`` Filters users based on the number of repositories they
have.
- ``location`` Filter users by the location indicated in their
profile.
- ``language`` Search for users that have repositories that match a
certain language.
- ``created`` Filter users based on when they joined.
- ``followers`` Filter users based on the number of followers they
have.
For more information about these qualifiers see: http://git.io/wjVYJw
:param str query: (required), a valid query as described above, e.g.,
``tom repos:>42 followers:>1000``
:param str sort: (optional), how the results should be sorted;
options: ``followers``, ``repositories``, or ``joined``; default:
best match
:param str order: (optional), the direction of the sorted results,
options: ``asc``, ``desc``; default: ``desc``
:param int per_page: (optional)
:param bool text_match: (optional), if True, return matching search
terms. See http://git.io/_V1zRwa for more information
:param int number: (optional), number of search results to return;
Default: -1 returns all available
:param str etag: (optional), ETag header value of the last request.
:return: generator of :class:`UserSearchResult
<github3.search.UserSearchResult>`
"""
params = {'q': query}
headers = {}
if sort in ('followers', 'repositories', 'joined'):
params['sort'] = sort
if order in ('asc', 'desc'):
params['order'] = order
if text_match:
headers = {
'Accept': 'application/vnd.github.v3.full.text-match+json'
}
url = self._build_url('search', 'users')
return SearchIterator(number, url, UserSearchResult, self, params,
etag, headers) |
0, module; 1, function_definition; 2, function_name:iter_starred; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, expression_statement; 11, import_from_statement; 12, expression_statement; 13, expression_statement; 14, expression_statement; 15, return_statement; 16, identifier:sort; 17, None; 18, identifier:direction; 19, None; 20, identifier:number; 21, unary_operator; 22, identifier:etag; 23, None; 24, comment:"""Iterate over repositories starred by this user.
.. versionchanged:: 0.5
Added sort and direction parameters (optional) as per the change in
GitHub's API.
:param int number: (optional), number of starred repos to return.
Default: -1, returns all available repos
:param str sort: (optional), either 'created' (when the star was
created) or 'updated' (when the repository was last pushed to)
:param str direction: (optional), either 'asc' or 'desc'. Default:
'desc'
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Repository <github3.repos.Repository>`
"""; 25, relative_import; 26, dotted_name; 27, assignment; 28, call; 29, assignment; 30, call; 31, integer:1; 32, import_prefix; 33, dotted_name; 34, identifier:Repository; 35, identifier:params; 36, dictionary; 37, attribute; 38, argument_list; 39, identifier:url; 40, call; 41, attribute; 42, argument_list; 43, identifier:repos; 44, pair; 45, pair; 46, identifier:self; 47, identifier:_remove_none; 48, identifier:params; 49, attribute; 50, argument_list; 51, identifier:self; 52, identifier:_iter; 53, call; 54, identifier:url; 55, identifier:Repository; 56, identifier:params; 57, identifier:etag; 58, string; 59, identifier:sort; 60, string; 61, identifier:direction; 62, attribute; 63, identifier:expand; 64, keyword_argument; 65, keyword_argument; 66, identifier:int; 67, argument_list; 68, string_content:sort; 69, string_content:direction; 70, identifier:self; 71, identifier:starred_urlt; 72, identifier:owner; 73, None; 74, identifier:repo; 75, None; 76, identifier:number | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 6, 16; 6, 17; 7, 18; 7, 19; 8, 20; 8, 21; 9, 22; 9, 23; 10, 24; 11, 25; 11, 26; 12, 27; 13, 28; 14, 29; 15, 30; 21, 31; 25, 32; 25, 33; 26, 34; 27, 35; 27, 36; 28, 37; 28, 38; 29, 39; 29, 40; 30, 41; 30, 42; 33, 43; 36, 44; 36, 45; 37, 46; 37, 47; 38, 48; 40, 49; 40, 50; 41, 51; 41, 52; 42, 53; 42, 54; 42, 55; 42, 56; 42, 57; 44, 58; 44, 59; 45, 60; 45, 61; 49, 62; 49, 63; 50, 64; 50, 65; 53, 66; 53, 67; 58, 68; 60, 69; 62, 70; 62, 71; 64, 72; 64, 73; 65, 74; 65, 75; 67, 76 | def iter_starred(self, sort=None, direction=None, number=-1, etag=None):
"""Iterate over repositories starred by this user.
.. versionchanged:: 0.5
Added sort and direction parameters (optional) as per the change in
GitHub's API.
:param int number: (optional), number of starred repos to return.
Default: -1, returns all available repos
:param str sort: (optional), either 'created' (when the star was
created) or 'updated' (when the repository was last pushed to)
:param str direction: (optional), either 'asc' or 'desc'. Default:
'desc'
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Repository <github3.repos.Repository>`
"""
from .repos import Repository
params = {'sort': sort, 'direction': direction}
self._remove_none(params)
url = self.starred_urlt.expand(owner=None, repo=None)
return self._iter(int(number), url, Repository, params, etag) |
0, module; 1, function_definition; 2, function_name:iter_forks; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, expression_statement; 10, expression_statement; 11, expression_statement; 12, if_statement; 13, return_statement; 14, identifier:sort; 15, string; 16, identifier:number; 17, unary_operator; 18, identifier:etag; 19, None; 20, comment:"""Iterate over forks of this repository.
:param str sort: (optional), accepted values:
('newest', 'oldest', 'watchers'), API default: 'newest'
:param int number: (optional), number of forks to return. Default: -1
returns all forks
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Repository <Repository>`
"""; 21, assignment; 22, assignment; 23, comparison_operator:sort in ('newest', 'oldest', 'watchers'); 24, block; 25, call; 26, integer:1; 27, identifier:url; 28, call; 29, identifier:params; 30, dictionary; 31, identifier:sort; 32, tuple; 33, expression_statement; 34, attribute; 35, argument_list; 36, attribute; 37, argument_list; 38, string; 39, string; 40, string; 41, assignment; 42, identifier:self; 43, identifier:_iter; 44, call; 45, identifier:url; 46, identifier:Repository; 47, identifier:params; 48, identifier:etag; 49, identifier:self; 50, identifier:_build_url; 51, string; 52, keyword_argument; 53, string_content:newest; 54, string_content:oldest; 55, string_content:watchers; 56, identifier:params; 57, dictionary; 58, identifier:int; 59, argument_list; 60, string_content:forks; 61, identifier:base_url; 62, attribute; 63, pair; 64, identifier:number; 65, identifier:self; 66, identifier:_api; 67, string; 68, identifier:sort; 69, string_content:sort | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 6, 14; 6, 15; 7, 16; 7, 17; 8, 18; 8, 19; 9, 20; 10, 21; 11, 22; 12, 23; 12, 24; 13, 25; 17, 26; 21, 27; 21, 28; 22, 29; 22, 30; 23, 31; 23, 32; 24, 33; 25, 34; 25, 35; 28, 36; 28, 37; 32, 38; 32, 39; 32, 40; 33, 41; 34, 42; 34, 43; 35, 44; 35, 45; 35, 46; 35, 47; 35, 48; 36, 49; 36, 50; 37, 51; 37, 52; 38, 53; 39, 54; 40, 55; 41, 56; 41, 57; 44, 58; 44, 59; 51, 60; 52, 61; 52, 62; 57, 63; 59, 64; 62, 65; 62, 66; 63, 67; 63, 68; 67, 69 | def iter_forks(self, sort='', number=-1, etag=None):
"""Iterate over forks of this repository.
:param str sort: (optional), accepted values:
('newest', 'oldest', 'watchers'), API default: 'newest'
:param int number: (optional), number of forks to return. Default: -1
returns all forks
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Repository <Repository>`
"""
url = self._build_url('forks', base_url=self._api)
params = {}
if sort in ('newest', 'oldest', 'watchers'):
params = {'sort': sort}
return self._iter(int(number), url, Repository, params, etag) |
0, module; 1, function_definition; 2, function_name:iter_issues; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, default_parameter; 12, default_parameter; 13, default_parameter; 14, default_parameter; 15, default_parameter; 16, expression_statement; 17, expression_statement; 18, expression_statement; 19, if_statement; 20, expression_statement; 21, expression_statement; 22, return_statement; 23, identifier:milestone; 24, None; 25, identifier:state; 26, None; 27, identifier:assignee; 28, None; 29, identifier:mentioned; 30, None; 31, identifier:labels; 32, None; 33, identifier:sort; 34, None; 35, identifier:direction; 36, None; 37, identifier:since; 38, None; 39, identifier:number; 40, unary_operator; 41, identifier:etag; 42, None; 43, comment:"""Iterate over issues on this repo based upon parameters passed.
.. versionchanged:: 0.9.0
The ``state`` parameter now accepts 'all' in addition to 'open'
and 'closed'.
:param int milestone: (optional), 'none', or '*'
:param str state: (optional), accepted values: ('all', 'open',
'closed')
:param str assignee: (optional), 'none', '*', or login name
:param str mentioned: (optional), user's login name
:param str labels: (optional), comma-separated list of labels, e.g.
'bug,ui,@high'
:param sort: (optional), accepted values:
('created', 'updated', 'comments', 'created')
:param str direction: (optional), accepted values: ('asc', 'desc')
:param since: (optional), Only issues after this date will
be returned. This can be a `datetime` or an `ISO8601` formatted
date string, e.g., 2012-05-20T23:10:27Z
:type since: datetime or string
:param int number: (optional), Number of issues to return.
By default all issues are returned
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Issue <github3.issues.issue.Issue>`\ s
"""; 44, assignment; 45, assignment; 46, boolean_operator; 47, block; 48, call; 49, call; 50, call; 51, integer:1; 52, identifier:url; 53, call; 54, identifier:params; 55, dictionary; 56, comparison_operator:milestone in ('*', 'none'); 57, call; 58, expression_statement; 59, attribute; 60, argument_list; 61, attribute; 62, argument_list; 63, attribute; 64, argument_list; 65, attribute; 66, argument_list; 67, pair; 68, pair; 69, identifier:milestone; 70, tuple; 71, identifier:isinstance; 72, argument_list; 73, assignment; 74, identifier:self; 75, identifier:_remove_none; 76, identifier:params; 77, identifier:params; 78, identifier:update; 79, call; 80, identifier:self; 81, identifier:_iter; 82, call; 83, identifier:url; 84, identifier:Issue; 85, identifier:params; 86, identifier:etag; 87, identifier:self; 88, identifier:_build_url; 89, string; 90, keyword_argument; 91, string; 92, identifier:assignee; 93, string; 94, identifier:mentioned; 95, string; 96, string; 97, identifier:milestone; 98, identifier:int; 99, subscript; 100, identifier:milestone; 101, identifier:issue_params; 102, argument_list; 103, identifier:int; 104, argument_list; 105, string_content:issues; 106, identifier:base_url; 107, attribute; 108, string_content:assignee; 109, string_content:mentioned; 110, string_content:*; 111, string_content:none; 112, identifier:params; 113, string; 114, None; 115, identifier:state; 116, identifier:labels; 117, identifier:sort; 118, identifier:direction; 119, identifier:since; 120, identifier:number; 121, identifier:self; 122, identifier:_api; 123, string_content:milestone | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 3, 12; 3, 13; 3, 14; 3, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 4, 22; 6, 23; 6, 24; 7, 25; 7, 26; 8, 27; 8, 28; 9, 29; 9, 30; 10, 31; 10, 32; 11, 33; 11, 34; 12, 35; 12, 36; 13, 37; 13, 38; 14, 39; 14, 40; 15, 41; 15, 42; 16, 43; 17, 44; 18, 45; 19, 46; 19, 47; 20, 48; 21, 49; 22, 50; 40, 51; 44, 52; 44, 53; 45, 54; 45, 55; 46, 56; 46, 57; 47, 58; 48, 59; 48, 60; 49, 61; 49, 62; 50, 63; 50, 64; 53, 65; 53, 66; 55, 67; 55, 68; 56, 69; 56, 70; 57, 71; 57, 72; 58, 73; 59, 74; 59, 75; 60, 76; 61, 77; 61, 78; 62, 79; 63, 80; 63, 81; 64, 82; 64, 83; 64, 84; 64, 85; 64, 86; 65, 87; 65, 88; 66, 89; 66, 90; 67, 91; 67, 92; 68, 93; 68, 94; 70, 95; 70, 96; 72, 97; 72, 98; 73, 99; 73, 100; 79, 101; 79, 102; 82, 103; 82, 104; 89, 105; 90, 106; 90, 107; 91, 108; 93, 109; 95, 110; 96, 111; 99, 112; 99, 113; 102, 114; 102, 115; 102, 116; 102, 117; 102, 118; 102, 119; 104, 120; 107, 121; 107, 122; 113, 123 | def iter_issues(self,
milestone=None,
state=None,
assignee=None,
mentioned=None,
labels=None,
sort=None,
direction=None,
since=None,
number=-1,
etag=None):
"""Iterate over issues on this repo based upon parameters passed.
.. versionchanged:: 0.9.0
The ``state`` parameter now accepts 'all' in addition to 'open'
and 'closed'.
:param int milestone: (optional), 'none', or '*'
:param str state: (optional), accepted values: ('all', 'open',
'closed')
:param str assignee: (optional), 'none', '*', or login name
:param str mentioned: (optional), user's login name
:param str labels: (optional), comma-separated list of labels, e.g.
'bug,ui,@high'
:param sort: (optional), accepted values:
('created', 'updated', 'comments', 'created')
:param str direction: (optional), accepted values: ('asc', 'desc')
:param since: (optional), Only issues after this date will
be returned. This can be a `datetime` or an `ISO8601` formatted
date string, e.g., 2012-05-20T23:10:27Z
:type since: datetime or string
:param int number: (optional), Number of issues to return.
By default all issues are returned
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Issue <github3.issues.issue.Issue>`\ s
"""
url = self._build_url('issues', base_url=self._api)
params = {'assignee': assignee, 'mentioned': mentioned}
if milestone in ('*', 'none') or isinstance(milestone, int):
params['milestone'] = milestone
self._remove_none(params)
params.update(
issue_params(None, state, labels, sort, direction,
since)
)
return self._iter(int(number), url, Issue, params, etag) |
0, module; 1, function_definition; 2, function_name:iter_milestones; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, expression_statement; 12, expression_statement; 13, expression_statement; 14, expression_statement; 15, for_statement; 16, if_statement; 17, return_statement; 18, identifier:state; 19, None; 20, identifier:sort; 21, None; 22, identifier:direction; 23, None; 24, identifier:number; 25, unary_operator; 26, identifier:etag; 27, None; 28, comment:"""Iterates over the milestones on this repository.
:param str state: (optional), state of the milestones, accepted
values: ('open', 'closed')
:param str sort: (optional), how to sort the milestones, accepted
values: ('due_date', 'completeness')
:param str direction: (optional), direction to sort the milestones,
accepted values: ('asc', 'desc')
:param int number: (optional), number of milestones to return.
Default: -1 returns all milestones
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of
:class:`Milestone <github3.issues.milestone.Milestone>`\ s
"""; 29, assignment; 30, assignment; 31, assignment; 32, tuple_pattern; 33, call; 34, block; 35, not_operator; 36, block; 37, call; 38, integer:1; 39, identifier:url; 40, call; 41, identifier:accepted; 42, dictionary; 43, identifier:params; 44, dictionary; 45, identifier:k; 46, identifier:v; 47, identifier:list; 48, argument_list; 49, if_statement; 50, identifier:params; 51, expression_statement; 52, attribute; 53, argument_list; 54, attribute; 55, argument_list; 56, pair; 57, pair; 58, pair; 59, pair; 60, pair; 61, pair; 62, call; 63, not_operator; 64, comment:# e.g., '' or None; 65, block; 66, assignment; 67, identifier:self; 68, identifier:_iter; 69, call; 70, identifier:url; 71, identifier:Milestone; 72, identifier:params; 73, identifier:etag; 74, identifier:self; 75, identifier:_build_url; 76, string; 77, keyword_argument; 78, string; 79, tuple; 80, string; 81, tuple; 82, string; 83, tuple; 84, string; 85, identifier:state; 86, string; 87, identifier:sort; 88, string; 89, identifier:direction; 90, attribute; 91, argument_list; 92, parenthesized_expression; 93, delete_statement; 94, identifier:params; 95, None; 96, identifier:int; 97, argument_list; 98, string_content:milestones; 99, identifier:base_url; 100, attribute; 101, string_content:state; 102, string; 103, string; 104, string_content:sort; 105, string; 106, string; 107, string_content:direction; 108, string; 109, string; 110, string_content:state; 111, string_content:sort; 112, string_content:direction; 113, identifier:params; 114, identifier:items; 115, boolean_operator; 116, subscript; 117, identifier:number; 118, identifier:self; 119, identifier:_api; 120, string_content:open; 121, string_content:closed; 122, string_content:due_date; 123, string_content:completeness; 124, string_content:asc; 125, string_content:desc; 126, identifier:v; 127, parenthesized_expression; 128, identifier:params; 129, identifier:k; 130, comparison_operator:v in accepted[k]; 131, identifier:v; 132, subscript; 133, identifier:accepted; 134, identifier:k | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 6, 18; 6, 19; 7, 20; 7, 21; 8, 22; 8, 23; 9, 24; 9, 25; 10, 26; 10, 27; 11, 28; 12, 29; 13, 30; 14, 31; 15, 32; 15, 33; 15, 34; 16, 35; 16, 36; 17, 37; 25, 38; 29, 39; 29, 40; 30, 41; 30, 42; 31, 43; 31, 44; 32, 45; 32, 46; 33, 47; 33, 48; 34, 49; 35, 50; 36, 51; 37, 52; 37, 53; 40, 54; 40, 55; 42, 56; 42, 57; 42, 58; 44, 59; 44, 60; 44, 61; 48, 62; 49, 63; 49, 64; 49, 65; 51, 66; 52, 67; 52, 68; 53, 69; 53, 70; 53, 71; 53, 72; 53, 73; 54, 74; 54, 75; 55, 76; 55, 77; 56, 78; 56, 79; 57, 80; 57, 81; 58, 82; 58, 83; 59, 84; 59, 85; 60, 86; 60, 87; 61, 88; 61, 89; 62, 90; 62, 91; 63, 92; 65, 93; 66, 94; 66, 95; 69, 96; 69, 97; 76, 98; 77, 99; 77, 100; 78, 101; 79, 102; 79, 103; 80, 104; 81, 105; 81, 106; 82, 107; 83, 108; 83, 109; 84, 110; 86, 111; 88, 112; 90, 113; 90, 114; 92, 115; 93, 116; 97, 117; 100, 118; 100, 119; 102, 120; 103, 121; 105, 122; 106, 123; 108, 124; 109, 125; 115, 126; 115, 127; 116, 128; 116, 129; 127, 130; 130, 131; 130, 132; 132, 133; 132, 134 | def iter_milestones(self, state=None, sort=None, direction=None,
number=-1, etag=None):
"""Iterates over the milestones on this repository.
:param str state: (optional), state of the milestones, accepted
values: ('open', 'closed')
:param str sort: (optional), how to sort the milestones, accepted
values: ('due_date', 'completeness')
:param str direction: (optional), direction to sort the milestones,
accepted values: ('asc', 'desc')
:param int number: (optional), number of milestones to return.
Default: -1 returns all milestones
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of
:class:`Milestone <github3.issues.milestone.Milestone>`\ s
"""
url = self._build_url('milestones', base_url=self._api)
accepted = {'state': ('open', 'closed'),
'sort': ('due_date', 'completeness'),
'direction': ('asc', 'desc')}
params = {'state': state, 'sort': sort, 'direction': direction}
for (k, v) in list(params.items()):
if not (v and (v in accepted[k])): # e.g., '' or None
del params[k]
if not params:
params = None
return self._iter(int(number), url, Milestone, params, etag) |
0, module; 1, function_definition; 2, function_name:iter_pulls; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, default_parameter; 12, default_parameter; 13, expression_statement; 14, expression_statement; 15, expression_statement; 16, if_statement; 17, expression_statement; 18, expression_statement; 19, return_statement; 20, identifier:state; 21, None; 22, identifier:head; 23, None; 24, identifier:base; 25, None; 26, identifier:sort; 27, string; 28, identifier:direction; 29, string; 30, identifier:number; 31, unary_operator; 32, identifier:etag; 33, None; 34, comment:"""List pull requests on repository.
.. versionchanged:: 0.9.0
- The ``state`` parameter now accepts 'all' in addition to 'open'
and 'closed'.
- The ``sort`` parameter was added.
- The ``direction`` parameter was added.
:param str state: (optional), accepted values: ('all', 'open',
'closed')
:param str head: (optional), filters pulls by head user and branch
name in the format ``user:ref-name``, e.g., ``seveas:debian``
:param str base: (optional), filter pulls by base branch name.
Example: ``develop``.
:param str sort: (optional), Sort pull requests by ``created``,
``updated``, ``popularity``, ``long-running``. Default: 'created'
:param str direction: (optional), Choose the direction to list pull
requests. Accepted values: ('desc', 'asc'). Default: 'desc'
:param int number: (optional), number of pulls to return. Default: -1
returns all available pull requests
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of
:class:`PullRequest <github3.pulls.PullRequest>`\ s
"""; 35, assignment; 36, assignment; 37, boolean_operator; 38, block; 39, call; 40, call; 41, call; 42, string_content:created; 43, string_content:desc; 44, integer:1; 45, identifier:url; 46, call; 47, identifier:params; 48, dictionary; 49, identifier:state; 50, comparison_operator:state.lower() in ('all', 'open', 'closed'); 51, expression_statement; 52, attribute; 53, argument_list; 54, attribute; 55, argument_list; 56, attribute; 57, argument_list; 58, attribute; 59, argument_list; 60, call; 61, tuple; 62, assignment; 63, identifier:params; 64, identifier:update; 65, keyword_argument; 66, keyword_argument; 67, keyword_argument; 68, keyword_argument; 69, identifier:self; 70, identifier:_remove_none; 71, identifier:params; 72, identifier:self; 73, identifier:_iter; 74, call; 75, identifier:url; 76, identifier:PullRequest; 77, identifier:params; 78, identifier:etag; 79, identifier:self; 80, identifier:_build_url; 81, string; 82, keyword_argument; 83, attribute; 84, argument_list; 85, string; 86, string; 87, string; 88, subscript; 89, call; 90, identifier:head; 91, identifier:head; 92, identifier:base; 93, identifier:base; 94, identifier:sort; 95, identifier:sort; 96, identifier:direction; 97, identifier:direction; 98, identifier:int; 99, argument_list; 100, string_content:pulls; 101, identifier:base_url; 102, attribute; 103, identifier:state; 104, identifier:lower; 105, string_content:all; 106, string_content:open; 107, string_content:closed; 108, identifier:params; 109, string; 110, attribute; 111, argument_list; 112, identifier:number; 113, identifier:self; 114, identifier:_api; 115, string_content:state; 116, identifier:state; 117, identifier:lower | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 3, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 6, 20; 6, 21; 7, 22; 7, 23; 8, 24; 8, 25; 9, 26; 9, 27; 10, 28; 10, 29; 11, 30; 11, 31; 12, 32; 12, 33; 13, 34; 14, 35; 15, 36; 16, 37; 16, 38; 17, 39; 18, 40; 19, 41; 27, 42; 29, 43; 31, 44; 35, 45; 35, 46; 36, 47; 36, 48; 37, 49; 37, 50; 38, 51; 39, 52; 39, 53; 40, 54; 40, 55; 41, 56; 41, 57; 46, 58; 46, 59; 50, 60; 50, 61; 51, 62; 52, 63; 52, 64; 53, 65; 53, 66; 53, 67; 53, 68; 54, 69; 54, 70; 55, 71; 56, 72; 56, 73; 57, 74; 57, 75; 57, 76; 57, 77; 57, 78; 58, 79; 58, 80; 59, 81; 59, 82; 60, 83; 60, 84; 61, 85; 61, 86; 61, 87; 62, 88; 62, 89; 65, 90; 65, 91; 66, 92; 66, 93; 67, 94; 67, 95; 68, 96; 68, 97; 74, 98; 74, 99; 81, 100; 82, 101; 82, 102; 83, 103; 83, 104; 85, 105; 86, 106; 87, 107; 88, 108; 88, 109; 89, 110; 89, 111; 99, 112; 102, 113; 102, 114; 109, 115; 110, 116; 110, 117 | def iter_pulls(self, state=None, head=None, base=None, sort='created',
direction='desc', number=-1, etag=None):
"""List pull requests on repository.
.. versionchanged:: 0.9.0
- The ``state`` parameter now accepts 'all' in addition to 'open'
and 'closed'.
- The ``sort`` parameter was added.
- The ``direction`` parameter was added.
:param str state: (optional), accepted values: ('all', 'open',
'closed')
:param str head: (optional), filters pulls by head user and branch
name in the format ``user:ref-name``, e.g., ``seveas:debian``
:param str base: (optional), filter pulls by base branch name.
Example: ``develop``.
:param str sort: (optional), Sort pull requests by ``created``,
``updated``, ``popularity``, ``long-running``. Default: 'created'
:param str direction: (optional), Choose the direction to list pull
requests. Accepted values: ('desc', 'asc'). Default: 'desc'
:param int number: (optional), number of pulls to return. Default: -1
returns all available pull requests
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of
:class:`PullRequest <github3.pulls.PullRequest>`\ s
"""
url = self._build_url('pulls', base_url=self._api)
params = {}
if state and state.lower() in ('all', 'open', 'closed'):
params['state'] = state.lower()
params.update(head=head, base=base, sort=sort, direction=direction)
self._remove_none(params)
return self._iter(int(number), url, PullRequest, params, etag) |
0, module; 1, function_definition; 2, function_name:iter_user_repos; 3, parameters; 4, block; 5, identifier:login; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, expression_statement; 12, if_statement; 13, return_statement; 14, identifier:type; 15, None; 16, identifier:sort; 17, None; 18, identifier:direction; 19, None; 20, identifier:number; 21, unary_operator; 22, identifier:etag; 23, None; 24, comment:"""List public repositories for the specified ``login``.
.. versionadded:: 0.6
.. note:: This replaces github3.iter_repos
:param str login: (required)
:param str type: (optional), accepted values:
('all', 'owner', 'member')
API default: 'all'
:param str sort: (optional), accepted values:
('created', 'updated', 'pushed', 'full_name')
API default: 'created'
:param str direction: (optional), accepted values:
('asc', 'desc'), API default: 'asc' when using 'full_name',
'desc' otherwise
:param int number: (optional), number of repositories to return.
Default: -1 returns all repositories
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Repository <github3.repos.Repository>`
objects
"""; 25, identifier:login; 26, block; 27, call; 28, integer:1; 29, return_statement; 30, identifier:iter; 31, argument_list; 32, call; 33, list; 34, attribute; 35, argument_list; 36, identifier:gh; 37, identifier:iter_user_repos; 38, identifier:login; 39, identifier:type; 40, identifier:sort; 41, identifier:direction; 42, identifier:number; 43, identifier:etag | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 4, 11; 4, 12; 4, 13; 6, 14; 6, 15; 7, 16; 7, 17; 8, 18; 8, 19; 9, 20; 9, 21; 10, 22; 10, 23; 11, 24; 12, 25; 12, 26; 13, 27; 21, 28; 26, 29; 27, 30; 27, 31; 29, 32; 31, 33; 32, 34; 32, 35; 34, 36; 34, 37; 35, 38; 35, 39; 35, 40; 35, 41; 35, 42; 35, 43 | def iter_user_repos(login, type=None, sort=None, direction=None, number=-1,
etag=None):
"""List public repositories for the specified ``login``.
.. versionadded:: 0.6
.. note:: This replaces github3.iter_repos
:param str login: (required)
:param str type: (optional), accepted values:
('all', 'owner', 'member')
API default: 'all'
:param str sort: (optional), accepted values:
('created', 'updated', 'pushed', 'full_name')
API default: 'created'
:param str direction: (optional), accepted values:
('asc', 'desc'), API default: 'asc' when using 'full_name',
'desc' otherwise
:param int number: (optional), number of repositories to return.
Default: -1 returns all repositories
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Repository <github3.repos.Repository>`
objects
"""
if login:
return gh.iter_user_repos(login, type, sort, direction, number, etag)
return iter([]) |
0, module; 1, function_definition; 2, function_name:sort_descendants; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, expression_statement; 8, expression_statement; 9, for_statement; 10, identifier:attr; 11, string:"name"; 12, comment:"""
This function sort the branches of a given tree by
considerening node names. After the tree is sorted, nodes are
labeled using ascendent numbers. This can be used to ensure
that nodes in a tree with the same node names are always
labeled in the same way. Note that if duplicated names are
present, extra criteria should be added to sort nodes.
Unique id is stored as a node._nid attribute
"""; 13, assignment; 14, identifier:n; 15, call; 16, block; 17, identifier:node2content; 18, call; 19, attribute; 20, argument_list; 21, if_statement; 22, attribute; 23, argument_list; 24, identifier:self; 25, identifier:traverse; 26, not_operator; 27, block; 28, identifier:self; 29, identifier:get_cached_content; 30, keyword_argument; 31, keyword_argument; 32, call; 33, expression_statement; 34, identifier:store_attr; 35, identifier:attr; 36, identifier:container_type; 37, identifier:list; 38, attribute; 39, argument_list; 40, call; 41, identifier:n; 42, identifier:is_leaf; 43, attribute; 44, argument_list; 45, attribute; 46, identifier:sort; 47, keyword_argument; 48, identifier:n; 49, identifier:children; 50, identifier:key; 51, lambda; 52, lambda_parameters; 53, call; 54, identifier:x; 55, identifier:str; 56, argument_list; 57, call; 58, identifier:sorted; 59, argument_list; 60, subscript; 61, identifier:node2content; 62, identifier:x | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 6, 10; 6, 11; 7, 12; 8, 13; 9, 14; 9, 15; 9, 16; 13, 17; 13, 18; 15, 19; 15, 20; 16, 21; 18, 22; 18, 23; 19, 24; 19, 25; 21, 26; 21, 27; 22, 28; 22, 29; 23, 30; 23, 31; 26, 32; 27, 33; 30, 34; 30, 35; 31, 36; 31, 37; 32, 38; 32, 39; 33, 40; 38, 41; 38, 42; 40, 43; 40, 44; 43, 45; 43, 46; 44, 47; 45, 48; 45, 49; 47, 50; 47, 51; 51, 52; 51, 53; 52, 54; 53, 55; 53, 56; 56, 57; 57, 58; 57, 59; 59, 60; 60, 61; 60, 62 | def sort_descendants(self, attr="name"):
"""
This function sort the branches of a given tree by
considerening node names. After the tree is sorted, nodes are
labeled using ascendent numbers. This can be used to ensure
that nodes in a tree with the same node names are always
labeled in the same way. Note that if duplicated names are
present, extra criteria should be added to sort nodes.
Unique id is stored as a node._nid attribute
"""
node2content = self.get_cached_content(store_attr=attr, container_type=list)
for n in self.traverse():
if not n.is_leaf():
n.children.sort(key=lambda x: str(sorted(node2content[x]))) |
0, module; 1, function_definition; 2, function_name:collect_backups; 3, parameters; 4, block; 5, identifier:self; 6, identifier:location; 7, expression_statement; 8, expression_statement; 9, expression_statement; 10, expression_statement; 11, expression_statement; 12, for_statement; 13, if_statement; 14, return_statement; 15, comment:"""
Collect the backups at the given location.
:param location: Any value accepted by :func:`coerce_location()`.
:returns: A sorted :class:`list` of :class:`Backup` objects (the
backups are sorted by their date).
:raises: :exc:`~exceptions.ValueError` when the given directory doesn't
exist or isn't readable.
"""; 16, assignment; 17, assignment; 18, call; 19, call; 20, identifier:entry; 21, call; 22, block; 23, identifier:backups; 24, block; 25, call; 26, identifier:backups; 27, list; 28, identifier:location; 29, call; 30, attribute; 31, argument_list; 32, attribute; 33, argument_list; 34, identifier:natsort; 35, argument_list; 36, expression_statement; 37, if_statement; 38, expression_statement; 39, identifier:sorted; 40, argument_list; 41, identifier:coerce_location; 42, argument_list; 43, identifier:logger; 44, identifier:info; 45, string:"Scanning %s for backups .."; 46, identifier:location; 47, identifier:location; 48, identifier:ensure_readable; 49, call; 50, assignment; 51, identifier:match; 52, block; 53, else_clause; 54, call; 55, identifier:backups; 56, identifier:location; 57, attribute; 58, argument_list; 59, identifier:match; 60, call; 61, if_statement; 62, block; 63, attribute; 64, argument_list; 65, attribute; 66, identifier:list_entries; 67, attribute; 68, attribute; 69, argument_list; 70, boolean_operator; 71, block; 72, elif_clause; 73, else_clause; 74, expression_statement; 75, identifier:logger; 76, identifier:info; 77, string:"Found %i timestamped backups in %s."; 78, call; 79, identifier:location; 80, identifier:location; 81, identifier:context; 82, identifier:location; 83, identifier:directory; 84, identifier:TIMESTAMP_PATTERN; 85, identifier:search; 86, identifier:entry; 87, attribute; 88, call; 89, expression_statement; 90, boolean_operator; 91, block; 92, block; 93, call; 94, identifier:len; 95, argument_list; 96, identifier:self; 97, identifier:exclude_list; 98, identifier:any; 99, generator_expression; 100, call; 101, attribute; 102, not_operator; 103, expression_statement; 104, try_statement; 105, attribute; 106, argument_list; 107, identifier:backups; 108, call; 109, for_in_clause; 110, attribute; 111, argument_list; 112, identifier:self; 113, identifier:include_list; 114, call; 115, call; 116, block; 117, except_clause; 118, identifier:logger; 119, identifier:debug; 120, string:"Failed to match time stamp in filename: %s"; 121, identifier:entry; 122, attribute; 123, argument_list; 124, identifier:p; 125, attribute; 126, identifier:logger; 127, identifier:verbose; 128, string:"Excluded %s (it matched the exclude list)."; 129, identifier:entry; 130, identifier:any; 131, generator_expression; 132, attribute; 133, argument_list; 134, expression_statement; 135, as_pattern; 136, block; 137, identifier:fnmatch; 138, identifier:fnmatch; 139, identifier:entry; 140, identifier:p; 141, identifier:self; 142, identifier:exclude_list; 143, call; 144, for_in_clause; 145, identifier:logger; 146, identifier:verbose; 147, string:"Excluded %s (it didn't match the include list)."; 148, identifier:entry; 149, call; 150, identifier:ValueError; 151, as_pattern_target; 152, expression_statement; 153, attribute; 154, argument_list; 155, identifier:p; 156, attribute; 157, attribute; 158, argument_list; 159, identifier:e; 160, call; 161, identifier:fnmatch; 162, identifier:fnmatch; 163, identifier:entry; 164, identifier:p; 165, identifier:self; 166, identifier:include_list; 167, identifier:backups; 168, identifier:append; 169, call; 170, attribute; 171, argument_list; 172, identifier:Backup; 173, argument_list; 174, identifier:logger; 175, identifier:notice; 176, string:"Ignoring %s due to invalid date (%s)."; 177, identifier:entry; 178, identifier:e; 179, keyword_argument; 180, keyword_argument; 181, identifier:pathname; 182, call; 183, identifier:timestamp; 184, call; 185, attribute; 186, argument_list; 187, attribute; 188, argument_list; 189, attribute; 190, identifier:join; 191, attribute; 192, identifier:entry; 193, identifier:datetime; 194, identifier:datetime; 195, list_splat; 196, identifier:os; 197, identifier:path; 198, identifier:location; 199, identifier:directory; 200, generator_expression; 201, call; 202, for_in_clause; 203, identifier:int; 204, argument_list; 205, identifier:group; 206, call; 207, identifier:group; 208, integer:10; 209, attribute; 210, argument_list; 211, identifier:match; 212, identifier:groups; 213, string; 214, string_content:0 | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 7, 15; 8, 16; 9, 17; 10, 18; 11, 19; 12, 20; 12, 21; 12, 22; 13, 23; 13, 24; 14, 25; 16, 26; 16, 27; 17, 28; 17, 29; 18, 30; 18, 31; 19, 32; 19, 33; 21, 34; 21, 35; 22, 36; 22, 37; 24, 38; 25, 39; 25, 40; 29, 41; 29, 42; 30, 43; 30, 44; 31, 45; 31, 46; 32, 47; 32, 48; 35, 49; 36, 50; 37, 51; 37, 52; 37, 53; 38, 54; 40, 55; 42, 56; 49, 57; 49, 58; 50, 59; 50, 60; 52, 61; 53, 62; 54, 63; 54, 64; 57, 65; 57, 66; 58, 67; 60, 68; 60, 69; 61, 70; 61, 71; 61, 72; 61, 73; 62, 74; 63, 75; 63, 76; 64, 77; 64, 78; 64, 79; 65, 80; 65, 81; 67, 82; 67, 83; 68, 84; 68, 85; 69, 86; 70, 87; 70, 88; 71, 89; 72, 90; 72, 91; 73, 92; 74, 93; 78, 94; 78, 95; 87, 96; 87, 97; 88, 98; 88, 99; 89, 100; 90, 101; 90, 102; 91, 103; 92, 104; 93, 105; 93, 106; 95, 107; 99, 108; 99, 109; 100, 110; 100, 111; 101, 112; 101, 113; 102, 114; 103, 115; 104, 116; 104, 117; 105, 118; 105, 119; 106, 120; 106, 121; 108, 122; 108, 123; 109, 124; 109, 125; 110, 126; 110, 127; 111, 128; 111, 129; 114, 130; 114, 131; 115, 132; 115, 133; 116, 134; 117, 135; 117, 136; 122, 137; 122, 138; 123, 139; 123, 140; 125, 141; 125, 142; 131, 143; 131, 144; 132, 145; 132, 146; 133, 147; 133, 148; 134, 149; 135, 150; 135, 151; 136, 152; 143, 153; 143, 154; 144, 155; 144, 156; 149, 157; 149, 158; 151, 159; 152, 160; 153, 161; 153, 162; 154, 163; 154, 164; 156, 165; 156, 166; 157, 167; 157, 168; 158, 169; 160, 170; 160, 171; 169, 172; 169, 173; 170, 174; 170, 175; 171, 176; 171, 177; 171, 178; 173, 179; 173, 180; 179, 181; 179, 182; 180, 183; 180, 184; 182, 185; 182, 186; 184, 187; 184, 188; 185, 189; 185, 190; 186, 191; 186, 192; 187, 193; 187, 194; 188, 195; 189, 196; 189, 197; 191, 198; 191, 199; 195, 200; 200, 201; 200, 202; 201, 203; 201, 204; 202, 205; 202, 206; 204, 207; 204, 208; 206, 209; 206, 210; 209, 211; 209, 212; 210, 213; 213, 214 | def collect_backups(self, location):
"""
Collect the backups at the given location.
:param location: Any value accepted by :func:`coerce_location()`.
:returns: A sorted :class:`list` of :class:`Backup` objects (the
backups are sorted by their date).
:raises: :exc:`~exceptions.ValueError` when the given directory doesn't
exist or isn't readable.
"""
backups = []
location = coerce_location(location)
logger.info("Scanning %s for backups ..", location)
location.ensure_readable()
for entry in natsort(location.context.list_entries(location.directory)):
match = TIMESTAMP_PATTERN.search(entry)
if match:
if self.exclude_list and any(fnmatch.fnmatch(entry, p) for p in self.exclude_list):
logger.verbose("Excluded %s (it matched the exclude list).", entry)
elif self.include_list and not any(fnmatch.fnmatch(entry, p) for p in self.include_list):
logger.verbose("Excluded %s (it didn't match the include list).", entry)
else:
try:
backups.append(Backup(
pathname=os.path.join(location.directory, entry),
timestamp=datetime.datetime(*(int(group, 10) for group in match.groups('0'))),
))
except ValueError as e:
logger.notice("Ignoring %s due to invalid date (%s).", entry, e)
else:
logger.debug("Failed to match time stamp in filename: %s", entry)
if backups:
logger.info("Found %i timestamped backups in %s.", len(backups), location)
return sorted(backups) |
0, module; 1, function_definition; 2, function_name:prepare_bam; 3, parameters; 4, block; 5, identifier:job; 6, identifier:uuid; 7, identifier:url; 8, identifier:config; 9, default_parameter; 10, default_parameter; 11, expression_statement; 12, comment:# 0: Align FASTQ or realign BAM; 13, if_statement; 14, comment:# 1: Sort BAM file if necessary; 15, comment:# Realigning BAM file shuffles read order; 16, if_statement; 17, comment:# 2: Index BAM; 18, comment:# The samtools index disk requirement depends on the input bam and the output bam index; 19, expression_statement; 20, expression_statement; 21, expression_statement; 22, expression_statement; 23, if_statement; 24, return_statement; 25, identifier:paired_url; 26, None; 27, identifier:rg_line; 28, None; 29, comment:"""
Prepares BAM file for Toil germline pipeline.
Steps in pipeline
0: Download and align BAM or FASTQ sample
1: Sort BAM
2: Index BAM
3: Run GATK preprocessing pipeline (Optional)
- Uploads preprocessed BAM to output directory
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str uuid: Unique identifier for the sample
:param str url: URL or local path to BAM file or FASTQs
:param Namespace config: Configuration options for pipeline
Requires the following config attributes:
config.genome_fasta FilesStoreID for reference genome fasta file
config.genome_fai FilesStoreID for reference genome fasta index file
config.genome_dict FilesStoreID for reference genome sequence dictionary file
config.g1k_indel FileStoreID for 1000G INDEL resource file
config.mills FileStoreID for Mills resource file
config.dbsnp FileStoreID for dbSNP resource file
config.suffix Suffix added to output filename
config.output_dir URL or local path to output directory
config.ssec Path to key file for SSE-C encryption
config.cores Number of cores for each job
config.xmx Java heap size in bytes
:param str|None paired_url: URL or local path to paired FASTQ file, default is None
:param str|None rg_line: RG line for BWA alignment (i.e. @RG\tID:foo\tSM:bar), default is None
:return: BAM and BAI FileStoreIDs
:rtype: tuple
"""; 30, attribute; 31, block; 32, comment:# 0: Download BAM; 33, elif_clause; 34, else_clause; 35, boolean_operator; 36, block; 37, else_clause; 38, assignment; 39, assignment; 40, call; 41, call; 42, attribute; 43, block; 44, else_clause; 45, expression_list; 46, identifier:config; 47, identifier:run_bwa; 48, expression_statement; 49, comparison_operator:'.bam' in url.lower(); 50, block; 51, block; 52, attribute; 53, not_operator; 54, expression_statement; 55, comment:# The samtools sort disk requirement depends on the input bam, the tmp files, and the; 56, comment:# sorted output bam.; 57, block; 58, identifier:index_bam_disk; 59, call; 60, identifier:index_bam; 61, call; 62, attribute; 63, argument_list; 64, attribute; 65, argument_list; 66, identifier:config; 67, identifier:preprocess; 68, expression_statement; 69, expression_statement; 70, expression_statement; 71, comment:# Update output BAM promises; 72, expression_statement; 73, expression_statement; 74, comment:# Save processed BAM; 75, expression_statement; 76, expression_statement; 77, expression_statement; 78, expression_statement; 79, block; 80, identifier:output_bam_promise; 81, identifier:output_bai_promise; 82, assignment; 83, string; 84, call; 85, expression_statement; 86, expression_statement; 87, raise_statement; 88, identifier:config; 89, identifier:sorted; 90, attribute; 91, assignment; 92, expression_statement; 93, expression_statement; 94, identifier:PromisedRequirement; 95, argument_list; 96, attribute; 97, argument_list; 98, identifier:job; 99, identifier:addChild; 100, identifier:get_bam; 101, identifier:sorted_bam; 102, identifier:addChild; 103, identifier:index_bam; 104, assignment; 105, call; 106, call; 107, assignment; 108, assignment; 109, assignment; 110, assignment; 111, assignment; 112, call; 113, expression_statement; 114, expression_statement; 115, identifier:get_bam; 116, call; 117, string_content:.bam; 118, attribute; 119, argument_list; 120, call; 121, assignment; 122, call; 123, identifier:config; 124, identifier:run_bwa; 125, identifier:sorted_bam; 126, identifier:get_bam; 127, assignment; 128, assignment; 129, lambda; 130, call; 131, identifier:job; 132, identifier:wrapJobFn; 133, identifier:run_samtools_index; 134, call; 135, keyword_argument; 136, identifier:preprocess; 137, call; 138, attribute; 139, argument_list; 140, attribute; 141, argument_list; 142, identifier:output_bam_promise; 143, call; 144, identifier:output_bai_promise; 145, call; 146, identifier:output_dir; 147, call; 148, identifier:filename; 149, call; 150, identifier:output_bam; 151, call; 152, attribute; 153, argument_list; 154, assignment; 155, assignment; 156, attribute; 157, argument_list; 158, identifier:url; 159, identifier:lower; 160, attribute; 161, argument_list; 162, identifier:get_bam; 163, call; 164, identifier:ValueError; 165, argument_list; 166, identifier:sorted_bam_disk; 167, call; 168, identifier:sorted_bam; 169, call; 170, lambda_parameters; 171, attribute; 172, attribute; 173, argument_list; 174, attribute; 175, argument_list; 176, identifier:disk; 177, identifier:index_bam_disk; 178, attribute; 179, argument_list; 180, identifier:sorted_bam; 181, identifier:addChild; 182, identifier:preprocess; 183, identifier:index_bam; 184, identifier:addChild; 185, identifier:preprocess; 186, attribute; 187, argument_list; 188, attribute; 189, argument_list; 190, attribute; 191, argument_list; 192, attribute; 193, argument_list; 194, attribute; 195, argument_list; 196, identifier:preprocess; 197, identifier:addChild; 198, identifier:output_bam; 199, identifier:output_bam_promise; 200, call; 201, identifier:output_bai_promise; 202, call; 203, call; 204, identifier:encapsulate; 205, attribute; 206, identifier:logToMaster; 207, binary_operator:"Downloading BAM: %s" % uuid; 208, attribute; 209, argument_list; 210, binary_operator:'Could not generate BAM file for %s\n'
'Provide a FASTQ URL and set run-bwa or '
'provide a BAM URL that includes .bam extension.' % uuid; 211, identifier:PromisedRequirement; 212, argument_list; 213, attribute; 214, argument_list; 215, identifier:bam; 216, identifier:bam; 217, identifier:size; 218, identifier:sorted_bam; 219, identifier:rv; 220, identifier:sorted_bam; 221, identifier:rv; 222, call; 223, identifier:encapsulate; 224, identifier:preprocess; 225, identifier:rv; 226, integer:0; 227, identifier:preprocess; 228, identifier:rv; 229, integer:1; 230, attribute; 231, identifier:join; 232, attribute; 233, identifier:uuid; 234, string; 235, identifier:format; 236, identifier:uuid; 237, attribute; 238, identifier:job; 239, identifier:wrapJobFn; 240, identifier:output_file_job; 241, identifier:filename; 242, call; 243, identifier:output_dir; 244, keyword_argument; 245, attribute; 246, argument_list; 247, attribute; 248, argument_list; 249, attribute; 250, argument_list; 251, identifier:job; 252, identifier:fileStore; 253, string:"Downloading BAM: %s"; 254, identifier:uuid; 255, call; 256, identifier:encapsulate; 257, concatenated_string; 258, identifier:uuid; 259, lambda; 260, call; 261, identifier:get_bam; 262, identifier:addChildJobFn; 263, identifier:run_samtools_sort; 264, call; 265, keyword_argument; 266, keyword_argument; 267, attribute; 268, argument_list; 269, identifier:os; 270, identifier:path; 271, identifier:config; 272, identifier:output_dir; 273, string_content:{}.preprocessed{}.bam; 274, identifier:config; 275, identifier:suffix; 276, attribute; 277, argument_list; 278, identifier:s3_key_path; 279, attribute; 280, identifier:sorted_bam; 281, identifier:rv; 282, identifier:index_bam; 283, identifier:rv; 284, identifier:job; 285, identifier:wrapJobFn; 286, identifier:setup_and_run_bwakit; 287, identifier:uuid; 288, identifier:url; 289, identifier:rg_line; 290, identifier:config; 291, keyword_argument; 292, attribute; 293, argument_list; 294, string; 295, string; 296, string; 297, lambda_parameters; 298, binary_operator:3 * bam.size; 299, attribute; 300, argument_list; 301, attribute; 302, argument_list; 303, identifier:cores; 304, attribute; 305, identifier:disk; 306, identifier:sorted_bam_disk; 307, identifier:job; 308, identifier:wrapJobFn; 309, identifier:run_gatk_preprocessing; 310, call; 311, call; 312, attribute; 313, attribute; 314, attribute; 315, attribute; 316, attribute; 317, attribute; 318, keyword_argument; 319, keyword_argument; 320, identifier:preprocess; 321, identifier:rv; 322, integer:0; 323, identifier:config; 324, identifier:ssec; 325, identifier:paired_url; 326, identifier:paired_url; 327, identifier:job; 328, identifier:wrapJobFn; 329, identifier:download_url_job; 330, identifier:url; 331, keyword_argument; 332, keyword_argument; 333, keyword_argument; 334, string_content; 335, string_content:Provide a FASTQ URL and set run-bwa or; 336, string_content:provide a BAM URL that includes .bam extension.; 337, identifier:bam; 338, integer:3; 339, attribute; 340, identifier:get_bam; 341, identifier:rv; 342, identifier:get_bam; 343, identifier:rv; 344, identifier:config; 345, identifier:cores; 346, attribute; 347, argument_list; 348, attribute; 349, argument_list; 350, identifier:config; 351, identifier:genome_fasta; 352, identifier:config; 353, identifier:genome_dict; 354, identifier:config; 355, identifier:genome_fai; 356, identifier:config; 357, identifier:g1k_indel; 358, identifier:config; 359, identifier:mills; 360, identifier:config; 361, identifier:dbsnp; 362, identifier:memory; 363, attribute; 364, identifier:cores; 365, attribute; 366, identifier:name; 367, string; 368, identifier:s3_key_path; 369, attribute; 370, identifier:disk; 371, attribute; 372, escape_sequence:\n; 373, identifier:bam; 374, identifier:size; 375, identifier:sorted_bam; 376, identifier:rv; 377, identifier:index_bam; 378, identifier:rv; 379, identifier:config; 380, identifier:xmx; 381, identifier:config; 382, identifier:cores; 383, string_content:toil.bam; 384, identifier:config; 385, identifier:ssec; 386, identifier:config; 387, identifier:file_size | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 4, 22; 4, 23; 4, 24; 9, 25; 9, 26; 10, 27; 10, 28; 11, 29; 13, 30; 13, 31; 13, 32; 13, 33; 13, 34; 16, 35; 16, 36; 16, 37; 19, 38; 20, 39; 21, 40; 22, 41; 23, 42; 23, 43; 23, 44; 24, 45; 30, 46; 30, 47; 31, 48; 33, 49; 33, 50; 34, 51; 35, 52; 35, 53; 36, 54; 37, 55; 37, 56; 37, 57; 38, 58; 38, 59; 39, 60; 39, 61; 40, 62; 40, 63; 41, 64; 41, 65; 42, 66; 42, 67; 43, 68; 43, 69; 43, 70; 43, 71; 43, 72; 43, 73; 43, 74; 43, 75; 43, 76; 43, 77; 43, 78; 44, 79; 45, 80; 45, 81; 48, 82; 49, 83; 49, 84; 50, 85; 50, 86; 51, 87; 52, 88; 52, 89; 53, 90; 54, 91; 57, 92; 57, 93; 59, 94; 59, 95; 61, 96; 61, 97; 62, 98; 62, 99; 63, 100; 64, 101; 64, 102; 65, 103; 68, 104; 69, 105; 70, 106; 72, 107; 73, 108; 75, 109; 76, 110; 77, 111; 78, 112; 79, 113; 79, 114; 82, 115; 82, 116; 83, 117; 84, 118; 84, 119; 85, 120; 86, 121; 87, 122; 90, 123; 90, 124; 91, 125; 91, 126; 92, 127; 93, 128; 95, 129; 95, 130; 96, 131; 96, 132; 97, 133; 97, 134; 97, 135; 104, 136; 104, 137; 105, 138; 105, 139; 106, 140; 106, 141; 107, 142; 107, 143; 108, 144; 108, 145; 109, 146; 109, 147; 110, 148; 110, 149; 111, 150; 111, 151; 112, 152; 112, 153; 113, 154; 114, 155; 116, 156; 116, 157; 118, 158; 118, 159; 120, 160; 120, 161; 121, 162; 121, 163; 122, 164; 122, 165; 127, 166; 127, 167; 128, 168; 128, 169; 129, 170; 129, 171; 130, 172; 130, 173; 134, 174; 134, 175; 135, 176; 135, 177; 137, 178; 137, 179; 138, 180; 138, 181; 139, 182; 140, 183; 140, 184; 141, 185; 143, 186; 143, 187; 145, 188; 145, 189; 147, 190; 147, 191; 149, 192; 149, 193; 151, 194; 151, 195; 152, 196; 152, 197; 153, 198; 154, 199; 154, 200; 155, 201; 155, 202; 156, 203; 156, 204; 160, 205; 160, 206; 161, 207; 163, 208; 163, 209; 165, 210; 167, 211; 167, 212; 169, 213; 169, 214; 170, 215; 171, 216; 171, 217; 172, 218; 172, 219; 174, 220; 174, 221; 178, 222; 178, 223; 186, 224; 186, 225; 187, 226; 188, 227; 188, 228; 189, 229; 190, 230; 190, 231; 191, 232; 191, 233; 192, 234; 192, 235; 193, 236; 193, 237; 194, 238; 194, 239; 195, 240; 195, 241; 195, 242; 195, 243; 195, 244; 200, 245; 200, 246; 202, 247; 202, 248; 203, 249; 203, 250; 205, 251; 205, 252; 207, 253; 207, 254; 208, 255; 208, 256; 210, 257; 210, 258; 212, 259; 212, 260; 213, 261; 213, 262; 214, 263; 214, 264; 214, 265; 214, 266; 222, 267; 222, 268; 230, 269; 230, 270; 232, 271; 232, 272; 234, 273; 237, 274; 237, 275; 242, 276; 242, 277; 244, 278; 244, 279; 245, 280; 245, 281; 247, 282; 247, 283; 249, 284; 249, 285; 250, 286; 250, 287; 250, 288; 250, 289; 250, 290; 250, 291; 255, 292; 255, 293; 257, 294; 257, 295; 257, 296; 259, 297; 259, 298; 260, 299; 260, 300; 264, 301; 264, 302; 265, 303; 265, 304; 266, 305; 266, 306; 267, 307; 267, 308; 268, 309; 268, 310; 268, 311; 268, 312; 268, 313; 268, 314; 268, 315; 268, 316; 268, 317; 268, 318; 268, 319; 276, 320; 276, 321; 277, 322; 279, 323; 279, 324; 291, 325; 291, 326; 292, 327; 292, 328; 293, 329; 293, 330; 293, 331; 293, 332; 293, 333; 294, 334; 295, 335; 296, 336; 297, 337; 298, 338; 298, 339; 299, 340; 299, 341; 301, 342; 301, 343; 304, 344; 304, 345; 310, 346; 310, 347; 311, 348; 311, 349; 312, 350; 312, 351; 313, 352; 313, 353; 314, 354; 314, 355; 315, 356; 315, 357; 316, 358; 316, 359; 317, 360; 317, 361; 318, 362; 318, 363; 319, 364; 319, 365; 331, 366; 331, 367; 332, 368; 332, 369; 333, 370; 333, 371; 334, 372; 339, 373; 339, 374; 346, 375; 346, 376; 348, 377; 348, 378; 363, 379; 363, 380; 365, 381; 365, 382; 367, 383; 369, 384; 369, 385; 371, 386; 371, 387 | def prepare_bam(job, uuid, url, config, paired_url=None, rg_line=None):
"""
Prepares BAM file for Toil germline pipeline.
Steps in pipeline
0: Download and align BAM or FASTQ sample
1: Sort BAM
2: Index BAM
3: Run GATK preprocessing pipeline (Optional)
- Uploads preprocessed BAM to output directory
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str uuid: Unique identifier for the sample
:param str url: URL or local path to BAM file or FASTQs
:param Namespace config: Configuration options for pipeline
Requires the following config attributes:
config.genome_fasta FilesStoreID for reference genome fasta file
config.genome_fai FilesStoreID for reference genome fasta index file
config.genome_dict FilesStoreID for reference genome sequence dictionary file
config.g1k_indel FileStoreID for 1000G INDEL resource file
config.mills FileStoreID for Mills resource file
config.dbsnp FileStoreID for dbSNP resource file
config.suffix Suffix added to output filename
config.output_dir URL or local path to output directory
config.ssec Path to key file for SSE-C encryption
config.cores Number of cores for each job
config.xmx Java heap size in bytes
:param str|None paired_url: URL or local path to paired FASTQ file, default is None
:param str|None rg_line: RG line for BWA alignment (i.e. @RG\tID:foo\tSM:bar), default is None
:return: BAM and BAI FileStoreIDs
:rtype: tuple
"""
# 0: Align FASTQ or realign BAM
if config.run_bwa:
get_bam = job.wrapJobFn(setup_and_run_bwakit,
uuid,
url,
rg_line,
config,
paired_url=paired_url).encapsulate()
# 0: Download BAM
elif '.bam' in url.lower():
job.fileStore.logToMaster("Downloading BAM: %s" % uuid)
get_bam = job.wrapJobFn(download_url_job,
url,
name='toil.bam',
s3_key_path=config.ssec,
disk=config.file_size).encapsulate()
else:
raise ValueError('Could not generate BAM file for %s\n'
'Provide a FASTQ URL and set run-bwa or '
'provide a BAM URL that includes .bam extension.' % uuid)
# 1: Sort BAM file if necessary
# Realigning BAM file shuffles read order
if config.sorted and not config.run_bwa:
sorted_bam = get_bam
else:
# The samtools sort disk requirement depends on the input bam, the tmp files, and the
# sorted output bam.
sorted_bam_disk = PromisedRequirement(lambda bam: 3 * bam.size, get_bam.rv())
sorted_bam = get_bam.addChildJobFn(run_samtools_sort,
get_bam.rv(),
cores=config.cores,
disk=sorted_bam_disk)
# 2: Index BAM
# The samtools index disk requirement depends on the input bam and the output bam index
index_bam_disk = PromisedRequirement(lambda bam: bam.size, sorted_bam.rv())
index_bam = job.wrapJobFn(run_samtools_index, sorted_bam.rv(), disk=index_bam_disk)
job.addChild(get_bam)
sorted_bam.addChild(index_bam)
if config.preprocess:
preprocess = job.wrapJobFn(run_gatk_preprocessing,
sorted_bam.rv(),
index_bam.rv(),
config.genome_fasta,
config.genome_dict,
config.genome_fai,
config.g1k_indel,
config.mills,
config.dbsnp,
memory=config.xmx,
cores=config.cores).encapsulate()
sorted_bam.addChild(preprocess)
index_bam.addChild(preprocess)
# Update output BAM promises
output_bam_promise = preprocess.rv(0)
output_bai_promise = preprocess.rv(1)
# Save processed BAM
output_dir = os.path.join(config.output_dir, uuid)
filename = '{}.preprocessed{}.bam'.format(uuid, config.suffix)
output_bam = job.wrapJobFn(output_file_job,
filename,
preprocess.rv(0),
output_dir,
s3_key_path=config.ssec)
preprocess.addChild(output_bam)
else:
output_bam_promise = sorted_bam.rv()
output_bai_promise = index_bam.rv()
return output_bam_promise, output_bai_promise |
0, module; 1, function_definition; 2, function_name:alignment; 3, parameters; 4, block; 5, identifier:job; 6, identifier:ids; 7, identifier:input_args; 8, identifier:sample; 9, expression_statement; 10, expression_statement; 11, comment:# ids['bam'] = job.fileStore.getEmptyFileStoreID(); 12, expression_statement; 13, expression_statement; 14, expression_statement; 15, expression_statement; 16, comment:# I/O; 17, expression_statement; 18, comment:# Get fastqs associated with this sample; 19, for_statement; 20, comment:# Parameters for BWA and Bamsort; 21, expression_statement; 22, expression_statement; 23, expression_statement; 24, comment:# Piping the output to a file handle; 25, with_statement; 26, with_statement; 27, comment:# Save in JobStore; 28, comment:# job.fileStore.updateGlobalFile(ids['bam'], os.path.join(work_dir, uuid + '.bam')); 29, expression_statement; 30, comment:# Copy file to S3; 31, if_statement; 32, comment:# Move file in output_dir; 33, if_statement; 34, comment:"""
Runs BWA and then Bamsort on the supplied fastqs for this sample
Input1: Toil Job instance
Input2: jobstore id dictionary
Input3: Input arguments dictionary
Input4: Sample tuple -- contains uuid and urls for the sample
"""; 35, assignment; 36, assignment; 37, assignment; 38, assignment; 39, assignment; 40, call; 41, identifier:url; 42, identifier:urls; 43, block; 44, assignment; 45, assignment; 46, assignment; 47, with_clause; 48, block; 49, with_clause; 50, block; 51, assignment; 52, subscript; 53, block; 54, subscript; 55, block; 56, pattern_list; 57, identifier:sample; 58, identifier:work_dir; 59, call; 60, identifier:output_dir; 61, subscript; 62, identifier:key_path; 63, subscript; 64, identifier:cores; 65, call; 66, identifier:return_input_paths; 67, argument_list; 68, expression_statement; 69, identifier:docker_cmd; 70, list; 71, identifier:bwa_command; 72, binary_operator:["jvivian/bwa",
"mem",
"-R", "@RG\tID:{0}\tPL:Illumina\tSM:{0}\tLB:KapaHyper".format(uuid),
"-T", str(0),
"-t", str(cores),
"/data/ref.fa"] + [os.path.join('/data/', os.path.basename(x)) for x in urls]; 73, identifier:bamsort_command; 74, list; 75, with_item; 76, expression_statement; 77, with_item; 78, expression_statement; 79, subscript; 80, call; 81, identifier:input_args; 82, string; 83, expression_statement; 84, identifier:input_args; 85, string; 86, expression_statement; 87, identifier:uuid; 88, identifier:urls; 89, attribute; 90, argument_list; 91, identifier:input_args; 92, string; 93, identifier:input_args; 94, string; 95, attribute; 96, argument_list; 97, identifier:job; 98, identifier:work_dir; 99, identifier:ids; 100, string; 101, string; 102, string; 103, string; 104, string; 105, string; 106, string; 107, call; 108, string; 109, string; 110, string; 111, string; 112, call; 113, list; 114, list_comprehension; 115, string:"jeltje/biobambam"; 116, string:"/usr/local/bin/bamsort"; 117, string:"inputformat=sam"; 118, string:"level=1"; 119, call; 120, call; 121, string:"calmdnm=1"; 122, string:"calmdnmrecompindetonly=1"; 123, string:"calmdnmreference=/data/ref.fa"; 124, call; 125, as_pattern; 126, call; 127, as_pattern; 128, call; 129, identifier:ids; 130, string; 131, attribute; 132, argument_list; 133, string_content:s3_dir; 134, call; 135, string_content:output_dir; 136, call; 137, attribute; 138, identifier:getLocalTempDir; 139, string_content:output_dir; 140, string_content:ssec; 141, identifier:multiprocessing; 142, identifier:cpu_count; 143, string_content:ref.fa; 144, string_content:ref.fa.amb; 145, string_content:ref.fa.ann; 146, string_content:ref.fa.bwt; 147, string_content:ref.fa.pac; 148, string_content:ref.fa.sa; 149, string_content:ref.fa.fai; 150, identifier:download_encrypted_file; 151, argument_list; 152, string_content:docker; 153, string_content:run; 154, string_content:--rm; 155, string_content:-v; 156, attribute; 157, argument_list; 158, string:"jvivian/bwa"; 159, string:"mem"; 160, string:"-R"; 161, call; 162, string:"-T"; 163, call; 164, string:"-t"; 165, call; 166, string:"/data/ref.fa"; 167, call; 168, for_in_clause; 169, attribute; 170, argument_list; 171, attribute; 172, argument_list; 173, attribute; 174, argument_list; 175, call; 176, as_pattern_target; 177, attribute; 178, argument_list; 179, call; 180, as_pattern_target; 181, attribute; 182, argument_list; 183, string_content:bam; 184, attribute; 185, identifier:writeGlobalFile; 186, call; 187, attribute; 188, argument_list; 189, identifier:move_to_output_dir; 190, argument_list; 191, identifier:job; 192, identifier:fileStore; 193, identifier:work_dir; 194, identifier:url; 195, identifier:key_path; 196, call; 197, string; 198, identifier:format; 199, identifier:work_dir; 200, attribute; 201, argument_list; 202, identifier:str; 203, argument_list; 204, identifier:str; 205, argument_list; 206, attribute; 207, argument_list; 208, identifier:x; 209, identifier:urls; 210, string:"inputthreads={}"; 211, identifier:format; 212, identifier:cores; 213, string:"outputthreads={}"; 214, identifier:format; 215, identifier:cores; 216, string:"I=/data/{}"; 217, identifier:format; 218, binary_operator:uuid + '.sam'; 219, identifier:open; 220, argument_list; 221, identifier:f_out; 222, identifier:subprocess; 223, identifier:check_call; 224, binary_operator:docker_cmd + bwa_command; 225, keyword_argument; 226, identifier:open; 227, argument_list; 228, identifier:f_out; 229, identifier:subprocess; 230, identifier:check_call; 231, binary_operator:docker_cmd + bamsort_command; 232, keyword_argument; 233, identifier:job; 234, identifier:fileStore; 235, attribute; 236, argument_list; 237, identifier:job; 238, identifier:addChildJobFn; 239, identifier:upload_bam_to_s3; 240, identifier:ids; 241, identifier:input_args; 242, identifier:sample; 243, keyword_argument; 244, keyword_argument; 245, keyword_argument; 246, identifier:work_dir; 247, identifier:output_dir; 248, keyword_argument; 249, keyword_argument; 250, attribute; 251, argument_list; 252, string_content:{}:/data; 253, string:"@RG\tID:{0}\tPL:Illumina\tSM:{0}\tLB:KapaHyper"; 254, identifier:format; 255, identifier:uuid; 256, integer:0; 257, identifier:cores; 258, attribute; 259, identifier:join; 260, string; 261, call; 262, identifier:uuid; 263, string; 264, call; 265, string; 266, identifier:docker_cmd; 267, identifier:bwa_command; 268, identifier:stdout; 269, identifier:f_out; 270, call; 271, string; 272, identifier:docker_cmd; 273, identifier:bamsort_command; 274, identifier:stdout; 275, identifier:f_out; 276, attribute; 277, identifier:join; 278, identifier:work_dir; 279, binary_operator:uuid + '.bam'; 280, identifier:cores; 281, integer:32; 282, identifier:memory; 283, string; 284, identifier:disk; 285, string; 286, identifier:uuid; 287, None; 288, identifier:files; 289, list; 290, attribute; 291, identifier:basename; 292, identifier:url; 293, identifier:os; 294, identifier:path; 295, string_content:/data/; 296, attribute; 297, argument_list; 298, string_content:.sam; 299, attribute; 300, argument_list; 301, string_content:w; 302, attribute; 303, argument_list; 304, string_content:w; 305, identifier:os; 306, identifier:path; 307, identifier:uuid; 308, string; 309, string_content:20 G; 310, string_content:30 G; 311, binary_operator:uuid + '.bam'; 312, identifier:os; 313, identifier:path; 314, attribute; 315, identifier:basename; 316, identifier:x; 317, attribute; 318, identifier:join; 319, identifier:work_dir; 320, binary_operator:uuid + '.sam'; 321, attribute; 322, identifier:join; 323, identifier:work_dir; 324, binary_operator:uuid + '.bam'; 325, string_content:.bam; 326, identifier:uuid; 327, string; 328, identifier:os; 329, identifier:path; 330, identifier:os; 331, identifier:path; 332, identifier:uuid; 333, string; 334, identifier:os; 335, identifier:path; 336, identifier:uuid; 337, string; 338, string_content:.bam; 339, string_content:.sam; 340, string_content:.bam | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 4, 22; 4, 23; 4, 24; 4, 25; 4, 26; 4, 27; 4, 28; 4, 29; 4, 30; 4, 31; 4, 32; 4, 33; 9, 34; 10, 35; 12, 36; 13, 37; 14, 38; 15, 39; 17, 40; 19, 41; 19, 42; 19, 43; 21, 44; 22, 45; 23, 46; 25, 47; 25, 48; 26, 49; 26, 50; 29, 51; 31, 52; 31, 53; 33, 54; 33, 55; 35, 56; 35, 57; 36, 58; 36, 59; 37, 60; 37, 61; 38, 62; 38, 63; 39, 64; 39, 65; 40, 66; 40, 67; 43, 68; 44, 69; 44, 70; 45, 71; 45, 72; 46, 73; 46, 74; 47, 75; 48, 76; 49, 77; 50, 78; 51, 79; 51, 80; 52, 81; 52, 82; 53, 83; 54, 84; 54, 85; 55, 86; 56, 87; 56, 88; 59, 89; 59, 90; 61, 91; 61, 92; 63, 93; 63, 94; 65, 95; 65, 96; 67, 97; 67, 98; 67, 99; 67, 100; 67, 101; 67, 102; 67, 103; 67, 104; 67, 105; 67, 106; 68, 107; 70, 108; 70, 109; 70, 110; 70, 111; 70, 112; 72, 113; 72, 114; 74, 115; 74, 116; 74, 117; 74, 118; 74, 119; 74, 120; 74, 121; 74, 122; 74, 123; 74, 124; 75, 125; 76, 126; 77, 127; 78, 128; 79, 129; 79, 130; 80, 131; 80, 132; 82, 133; 83, 134; 85, 135; 86, 136; 89, 137; 89, 138; 92, 139; 94, 140; 95, 141; 95, 142; 100, 143; 101, 144; 102, 145; 103, 146; 104, 147; 105, 148; 106, 149; 107, 150; 107, 151; 108, 152; 109, 153; 110, 154; 111, 155; 112, 156; 112, 157; 113, 158; 113, 159; 113, 160; 113, 161; 113, 162; 113, 163; 113, 164; 113, 165; 113, 166; 114, 167; 114, 168; 119, 169; 119, 170; 120, 171; 120, 172; 124, 173; 124, 174; 125, 175; 125, 176; 126, 177; 126, 178; 127, 179; 127, 180; 128, 181; 128, 182; 130, 183; 131, 184; 131, 185; 132, 186; 134, 187; 134, 188; 136, 189; 136, 190; 137, 191; 137, 192; 151, 193; 151, 194; 151, 195; 151, 196; 156, 197; 156, 198; 157, 199; 161, 200; 161, 201; 163, 202; 163, 203; 165, 204; 165, 205; 167, 206; 167, 207; 168, 208; 168, 209; 169, 210; 169, 211; 170, 212; 171, 213; 171, 214; 172, 215; 173, 216; 173, 217; 174, 218; 175, 219; 175, 220; 176, 221; 177, 222; 177, 223; 178, 224; 178, 225; 179, 226; 179, 227; 180, 228; 181, 229; 181, 230; 182, 231; 182, 232; 184, 233; 184, 234; 186, 235; 186, 236; 187, 237; 187, 238; 188, 239; 188, 240; 188, 241; 188, 242; 188, 243; 188, 244; 188, 245; 190, 246; 190, 247; 190, 248; 190, 249; 196, 250; 196, 251; 197, 252; 200, 253; 200, 254; 201, 255; 203, 256; 205, 257; 206, 258; 206, 259; 207, 260; 207, 261; 218, 262; 218, 263; 220, 264; 220, 265; 224, 266; 224, 267; 225, 268; 225, 269; 227, 270; 227, 271; 231, 272; 231, 273; 232, 274; 232, 275; 235, 276; 235, 277; 236, 278; 236, 279; 243, 280; 243, 281; 244, 282; 244, 283; 245, 284; 245, 285; 248, 286; 248, 287; 249, 288; 249, 289; 250, 290; 250, 291; 251, 292; 258, 293; 258, 294; 260, 295; 261, 296; 261, 297; 263, 298; 264, 299; 264, 300; 265, 301; 270, 302; 270, 303; 271, 304; 276, 305; 276, 306; 279, 307; 279, 308; 283, 309; 285, 310; 289, 311; 290, 312; 290, 313; 296, 314; 296, 315; 297, 316; 299, 317; 299, 318; 300, 319; 300, 320; 302, 321; 302, 322; 303, 323; 303, 324; 308, 325; 311, 326; 311, 327; 314, 328; 314, 329; 317, 330; 317, 331; 320, 332; 320, 333; 321, 334; 321, 335; 324, 336; 324, 337; 327, 338; 333, 339; 337, 340 | def alignment(job, ids, input_args, sample):
"""
Runs BWA and then Bamsort on the supplied fastqs for this sample
Input1: Toil Job instance
Input2: jobstore id dictionary
Input3: Input arguments dictionary
Input4: Sample tuple -- contains uuid and urls for the sample
"""
uuid, urls = sample
# ids['bam'] = job.fileStore.getEmptyFileStoreID()
work_dir = job.fileStore.getLocalTempDir()
output_dir = input_args['output_dir']
key_path = input_args['ssec']
cores = multiprocessing.cpu_count()
# I/O
return_input_paths(job, work_dir, ids, 'ref.fa', 'ref.fa.amb', 'ref.fa.ann',
'ref.fa.bwt', 'ref.fa.pac', 'ref.fa.sa', 'ref.fa.fai')
# Get fastqs associated with this sample
for url in urls:
download_encrypted_file(work_dir, url, key_path, os.path.basename(url))
# Parameters for BWA and Bamsort
docker_cmd = ['docker', 'run', '--rm', '-v', '{}:/data'.format(work_dir)]
bwa_command = ["jvivian/bwa",
"mem",
"-R", "@RG\tID:{0}\tPL:Illumina\tSM:{0}\tLB:KapaHyper".format(uuid),
"-T", str(0),
"-t", str(cores),
"/data/ref.fa"] + [os.path.join('/data/', os.path.basename(x)) for x in urls]
bamsort_command = ["jeltje/biobambam",
"/usr/local/bin/bamsort",
"inputformat=sam",
"level=1",
"inputthreads={}".format(cores),
"outputthreads={}".format(cores),
"calmdnm=1",
"calmdnmrecompindetonly=1",
"calmdnmreference=/data/ref.fa",
"I=/data/{}".format(uuid + '.sam')]
# Piping the output to a file handle
with open(os.path.join(work_dir, uuid + '.sam'), 'w') as f_out:
subprocess.check_call(docker_cmd + bwa_command, stdout=f_out)
with open(os.path.join(work_dir, uuid + '.bam'), 'w') as f_out:
subprocess.check_call(docker_cmd + bamsort_command, stdout=f_out)
# Save in JobStore
# job.fileStore.updateGlobalFile(ids['bam'], os.path.join(work_dir, uuid + '.bam'))
ids['bam'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, uuid + '.bam'))
# Copy file to S3
if input_args['s3_dir']:
job.addChildJobFn(upload_bam_to_s3, ids, input_args, sample, cores=32, memory='20 G', disk='30 G')
# Move file in output_dir
if input_args['output_dir']:
move_to_output_dir(work_dir, output_dir, uuid=None, files=[uuid + '.bam']) |
0, module; 1, function_definition; 2, function_name:bamsort_and_index; 3, parameters; 4, block; 5, identifier:job; 6, identifier:job_vars; 7, expression_statement; 8, comment:# Unpack variables; 9, expression_statement; 10, expression_statement; 11, expression_statement; 12, comment:# I/O; 13, expression_statement; 14, expression_statement; 15, comment:# Command -- second argument is "Output Prefix"; 16, expression_statement; 17, expression_statement; 18, expression_statement; 19, expression_statement; 20, comment:# Write to FileStore; 21, expression_statement; 22, expression_statement; 23, comment:# Run child job; 24, expression_statement; 25, expression_statement; 26, return_statement; 27, comment:"""
Sorts bam file and produces index file
job_vars: tuple Tuple of dictionaries: input_args and ids
"""; 28, assignment; 29, assignment; 30, assignment; 31, assignment; 32, assignment; 33, assignment; 34, assignment; 35, call; 36, call; 37, assignment; 38, assignment; 39, assignment; 40, assignment; 41, expression_list; 42, pattern_list; 43, identifier:job_vars; 44, identifier:work_dir; 45, call; 46, identifier:sudo; 47, subscript; 48, identifier:rg_alignments; 49, call; 50, identifier:output; 51, call; 52, identifier:cmd1; 53, list; 54, identifier:cmd2; 55, list; 56, identifier:docker_call; 57, argument_list; 58, identifier:docker_call; 59, argument_list; 60, subscript; 61, call; 62, subscript; 63, call; 64, identifier:output_ids; 65, call; 66, identifier:rseq_id; 67, call; 68, identifier:rseq_id; 69, identifier:output_ids; 70, identifier:input_args; 71, identifier:ids; 72, attribute; 73, argument_list; 74, identifier:input_args; 75, string; 76, identifier:return_input_paths; 77, argument_list; 78, attribute; 79, argument_list; 80, string; 81, call; 82, call; 83, string; 84, call; 85, keyword_argument; 86, keyword_argument; 87, keyword_argument; 88, keyword_argument; 89, keyword_argument; 90, keyword_argument; 91, keyword_argument; 92, keyword_argument; 93, identifier:ids; 94, string; 95, attribute; 96, argument_list; 97, identifier:ids; 98, string; 99, attribute; 100, argument_list; 101, attribute; 102, argument_list; 103, attribute; 104, argument_list; 105, attribute; 106, identifier:getLocalTempDir; 107, string_content:sudo; 108, identifier:job; 109, identifier:work_dir; 110, identifier:ids; 111, string; 112, attribute; 113, identifier:join; 114, identifier:work_dir; 115, string; 116, string_content:sort; 117, identifier:docker_path; 118, argument_list; 119, identifier:docker_path; 120, argument_list; 121, string_content:index; 122, identifier:docker_path; 123, argument_list; 124, identifier:tool; 125, string; 126, identifier:tool_parameters; 127, identifier:cmd1; 128, identifier:work_dir; 129, identifier:work_dir; 130, identifier:sudo; 131, identifier:sudo; 132, identifier:tool; 133, string; 134, identifier:tool_parameters; 135, identifier:cmd2; 136, identifier:work_dir; 137, identifier:work_dir; 138, identifier:sudo; 139, identifier:sudo; 140, string_content:sorted.bam; 141, attribute; 142, identifier:writeGlobalFile; 143, identifier:output; 144, string_content:sorted.bam.bai; 145, attribute; 146, identifier:writeGlobalFile; 147, call; 148, call; 149, identifier:rv; 150, call; 151, identifier:rv; 152, identifier:job; 153, identifier:fileStore; 154, string_content:rg_alignments.bam; 155, identifier:os; 156, identifier:path; 157, string_content:sorted.bam; 158, identifier:rg_alignments; 159, string; 160, identifier:output; 161, string_content:quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e; 162, string_content:quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e; 163, identifier:job; 164, identifier:fileStore; 165, identifier:job; 166, identifier:fileStore; 167, attribute; 168, argument_list; 169, attribute; 170, argument_list; 171, attribute; 172, argument_list; 173, string_content:sorted; 174, attribute; 175, identifier:join; 176, identifier:work_dir; 177, string; 178, identifier:job; 179, identifier:addChildJobFn; 180, identifier:sort_bam_by_reference; 181, identifier:job_vars; 182, keyword_argument; 183, identifier:job; 184, identifier:addChildJobFn; 185, identifier:rseq_qc; 186, identifier:job_vars; 187, keyword_argument; 188, identifier:os; 189, identifier:path; 190, string_content:sorted.bam.bai; 191, identifier:disk; 192, string; 193, identifier:disk; 194, string; 195, string_content:50 G; 196, string_content:20 G | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 4, 22; 4, 23; 4, 24; 4, 25; 4, 26; 7, 27; 9, 28; 10, 29; 11, 30; 13, 31; 14, 32; 16, 33; 17, 34; 18, 35; 19, 36; 21, 37; 22, 38; 24, 39; 25, 40; 26, 41; 28, 42; 28, 43; 29, 44; 29, 45; 30, 46; 30, 47; 31, 48; 31, 49; 32, 50; 32, 51; 33, 52; 33, 53; 34, 54; 34, 55; 35, 56; 35, 57; 36, 58; 36, 59; 37, 60; 37, 61; 38, 62; 38, 63; 39, 64; 39, 65; 40, 66; 40, 67; 41, 68; 41, 69; 42, 70; 42, 71; 45, 72; 45, 73; 47, 74; 47, 75; 49, 76; 49, 77; 51, 78; 51, 79; 53, 80; 53, 81; 53, 82; 55, 83; 55, 84; 57, 85; 57, 86; 57, 87; 57, 88; 59, 89; 59, 90; 59, 91; 59, 92; 60, 93; 60, 94; 61, 95; 61, 96; 62, 97; 62, 98; 63, 99; 63, 100; 65, 101; 65, 102; 67, 103; 67, 104; 72, 105; 72, 106; 75, 107; 77, 108; 77, 109; 77, 110; 77, 111; 78, 112; 78, 113; 79, 114; 79, 115; 80, 116; 81, 117; 81, 118; 82, 119; 82, 120; 83, 121; 84, 122; 84, 123; 85, 124; 85, 125; 86, 126; 86, 127; 87, 128; 87, 129; 88, 130; 88, 131; 89, 132; 89, 133; 90, 134; 90, 135; 91, 136; 91, 137; 92, 138; 92, 139; 94, 140; 95, 141; 95, 142; 96, 143; 98, 144; 99, 145; 99, 146; 100, 147; 101, 148; 101, 149; 103, 150; 103, 151; 105, 152; 105, 153; 111, 154; 112, 155; 112, 156; 115, 157; 118, 158; 120, 159; 123, 160; 125, 161; 133, 162; 141, 163; 141, 164; 145, 165; 145, 166; 147, 167; 147, 168; 148, 169; 148, 170; 150, 171; 150, 172; 159, 173; 167, 174; 167, 175; 168, 176; 168, 177; 169, 178; 169, 179; 170, 180; 170, 181; 170, 182; 171, 183; 171, 184; 172, 185; 172, 186; 172, 187; 174, 188; 174, 189; 177, 190; 182, 191; 182, 192; 187, 193; 187, 194; 192, 195; 194, 196 | def bamsort_and_index(job, job_vars):
"""
Sorts bam file and produces index file
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
# Unpack variables
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
sudo = input_args['sudo']
# I/O
rg_alignments = return_input_paths(job, work_dir, ids, 'rg_alignments.bam')
output = os.path.join(work_dir, 'sorted.bam')
# Command -- second argument is "Output Prefix"
cmd1 = ['sort', docker_path(rg_alignments), docker_path('sorted')]
cmd2 = ['index', docker_path(output)]
docker_call(tool='quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e',
tool_parameters=cmd1, work_dir=work_dir, sudo=sudo)
docker_call(tool='quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e',
tool_parameters=cmd2, work_dir=work_dir, sudo=sudo)
# Write to FileStore
ids['sorted.bam'] = job.fileStore.writeGlobalFile(output)
ids['sorted.bam.bai'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'sorted.bam.bai'))
# Run child job
output_ids = job.addChildJobFn(sort_bam_by_reference, job_vars, disk='50 G').rv()
rseq_id = job.addChildJobFn(rseq_qc, job_vars, disk='20 G').rv()
return rseq_id, output_ids |
0, module; 1, function_definition; 2, function_name:sort_bam_by_reference; 3, parameters; 4, block; 5, identifier:job; 6, identifier:job_vars; 7, expression_statement; 8, comment:# Unpack variables; 9, expression_statement; 10, expression_statement; 11, comment:# I/O; 12, expression_statement; 13, expression_statement; 14, comment:# Call: Samtools; 15, expression_statement; 16, expression_statement; 17, for_statement; 18, expression_statement; 19, comment:# Iterate through chromosomes to create mini-bams; 20, for_statement; 21, expression_statement; 22, expression_statement; 23, expression_statement; 24, comment:# Write to FileStore; 25, expression_statement; 26, expression_statement; 27, expression_statement; 28, return_statement; 29, comment:"""
Sorts the bam by reference
job_vars: tuple Tuple of dictionaries: input_args and ids
"""; 30, assignment; 31, assignment; 32, assignment; 33, assignment; 34, assignment; 35, assignment; 36, identifier:line; 37, identifier:handle; 38, block; 39, call; 40, identifier:chrom; 41, identifier:ref_seqs; 42, comment:# job.addChildJobFn(sbbr_child, chrom, os.path.join(work_dir, chrom), sorted_bam); 43, block; 44, assignment; 45, assignment; 46, call; 47, assignment; 48, assignment; 49, assignment; 50, expression_list; 51, pattern_list; 52, identifier:job_vars; 53, identifier:work_dir; 54, call; 55, pattern_list; 56, call; 57, identifier:output; 58, call; 59, identifier:ref_seqs; 60, list; 61, identifier:handle; 62, attribute; 63, if_statement; 64, attribute; 65, argument_list; 66, expression_statement; 67, expression_statement; 68, expression_statement; 69, expression_statement; 70, identifier:sorted_files; 71, list_comprehension; 72, identifier:cmd; 73, binary_operator:["samtools", "cat", "-o", output] + sorted_files; 74, attribute; 75, argument_list; 76, subscript; 77, call; 78, identifier:rsem_id; 79, call; 80, identifier:exon_id; 81, call; 82, identifier:exon_id; 83, identifier:rsem_id; 84, identifier:input_args; 85, identifier:ids; 86, attribute; 87, argument_list; 88, identifier:sorted_bam; 89, identifier:sorted_bai; 90, identifier:return_input_paths; 91, argument_list; 92, attribute; 93, argument_list; 94, call; 95, identifier:stdout; 96, call; 97, block; 98, identifier:handle; 99, identifier:close; 100, assignment; 101, assignment; 102, assignment; 103, call; 104, binary_operator:os.path.join(work_dir, chrom) + '.bam'; 105, for_in_clause; 106, list; 107, identifier:sorted_files; 108, identifier:subprocess; 109, identifier:check_call; 110, identifier:cmd; 111, identifier:ids; 112, string; 113, attribute; 114, argument_list; 115, attribute; 116, argument_list; 117, attribute; 118, argument_list; 119, attribute; 120, identifier:getLocalTempDir; 121, identifier:job; 122, identifier:work_dir; 123, identifier:ids; 124, string; 125, string; 126, attribute; 127, identifier:join; 128, identifier:work_dir; 129, string; 130, attribute; 131, argument_list; 132, attribute; 133, argument_list; 134, expression_statement; 135, expression_statement; 136, expression_statement; 137, identifier:cmd_view; 138, list; 139, identifier:cmd_sort; 140, list; 141, identifier:p1; 142, call; 143, attribute; 144, argument_list; 145, call; 146, string; 147, identifier:chrom; 148, identifier:ref_seqs; 149, string:"samtools"; 150, string:"cat"; 151, string:"-o"; 152, identifier:output; 153, string_content:sort_by_ref.bam; 154, attribute; 155, identifier:writeGlobalFile; 156, identifier:output; 157, call; 158, identifier:rv; 159, call; 160, identifier:rv; 161, identifier:job; 162, identifier:fileStore; 163, string_content:sorted.bam; 164, string_content:sorted.bam.bai; 165, identifier:os; 166, identifier:path; 167, string_content:sort_by_ref.bam; 168, identifier:subprocess; 169, identifier:Popen; 170, list; 171, keyword_argument; 172, identifier:line; 173, identifier:startswith; 174, string:"@SQ"; 175, assignment; 176, assignment; 177, call; 178, string:"samtools"; 179, string:"view"; 180, string:"-b"; 181, identifier:sorted_bam; 182, identifier:chrom; 183, string:"samtools"; 184, string:"sort"; 185, string:"-m"; 186, string:"3000000000"; 187, string:"-n"; 188, string:"-"; 189, call; 190, attribute; 191, argument_list; 192, identifier:subprocess; 193, identifier:check_call; 194, identifier:cmd_sort; 195, keyword_argument; 196, attribute; 197, argument_list; 198, string_content:.bam; 199, identifier:job; 200, identifier:fileStore; 201, attribute; 202, argument_list; 203, attribute; 204, argument_list; 205, string:"samtools"; 206, string:"view"; 207, string:"-H"; 208, identifier:sorted_bam; 209, identifier:stdout; 210, attribute; 211, identifier:tmp; 212, call; 213, identifier:chrom; 214, subscript; 215, attribute; 216, argument_list; 217, attribute; 218, argument_list; 219, identifier:subprocess; 220, identifier:Popen; 221, identifier:cmd_view; 222, keyword_argument; 223, identifier:stdin; 224, attribute; 225, attribute; 226, identifier:join; 227, identifier:work_dir; 228, identifier:chrom; 229, identifier:job; 230, identifier:addChildJobFn; 231, identifier:transcriptome; 232, identifier:job_vars; 233, keyword_argument; 234, keyword_argument; 235, identifier:job; 236, identifier:addChildJobFn; 237, identifier:exon_count; 238, identifier:job_vars; 239, keyword_argument; 240, identifier:subprocess; 241, identifier:PIPE; 242, attribute; 243, argument_list; 244, call; 245, integer:1; 246, identifier:ref_seqs; 247, identifier:append; 248, identifier:chrom; 249, attribute; 250, identifier:join; 251, identifier:work_dir; 252, identifier:chrom; 253, identifier:stdout; 254, attribute; 255, identifier:p1; 256, identifier:stdout; 257, identifier:os; 258, identifier:path; 259, identifier:disk; 260, string; 261, identifier:memory; 262, string; 263, identifier:disk; 264, string; 265, identifier:line; 266, identifier:split; 267, string:"\t"; 268, attribute; 269, argument_list; 270, identifier:os; 271, identifier:path; 272, identifier:subprocess; 273, identifier:PIPE; 274, string_content:30 G; 275, string_content:30 G; 276, string_content:30 G; 277, subscript; 278, identifier:split; 279, string:":"; 280, identifier:tmp; 281, integer:1 | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 4, 22; 4, 23; 4, 24; 4, 25; 4, 26; 4, 27; 4, 28; 7, 29; 9, 30; 10, 31; 12, 32; 13, 33; 15, 34; 16, 35; 17, 36; 17, 37; 17, 38; 18, 39; 20, 40; 20, 41; 20, 42; 20, 43; 21, 44; 22, 45; 23, 46; 25, 47; 26, 48; 27, 49; 28, 50; 30, 51; 30, 52; 31, 53; 31, 54; 32, 55; 32, 56; 33, 57; 33, 58; 34, 59; 34, 60; 35, 61; 35, 62; 38, 63; 39, 64; 39, 65; 43, 66; 43, 67; 43, 68; 43, 69; 44, 70; 44, 71; 45, 72; 45, 73; 46, 74; 46, 75; 47, 76; 47, 77; 48, 78; 48, 79; 49, 80; 49, 81; 50, 82; 50, 83; 51, 84; 51, 85; 54, 86; 54, 87; 55, 88; 55, 89; 56, 90; 56, 91; 58, 92; 58, 93; 62, 94; 62, 95; 63, 96; 63, 97; 64, 98; 64, 99; 66, 100; 67, 101; 68, 102; 69, 103; 71, 104; 71, 105; 73, 106; 73, 107; 74, 108; 74, 109; 75, 110; 76, 111; 76, 112; 77, 113; 77, 114; 79, 115; 79, 116; 81, 117; 81, 118; 86, 119; 86, 120; 91, 121; 91, 122; 91, 123; 91, 124; 91, 125; 92, 126; 92, 127; 93, 128; 93, 129; 94, 130; 94, 131; 96, 132; 96, 133; 97, 134; 97, 135; 97, 136; 100, 137; 100, 138; 101, 139; 101, 140; 102, 141; 102, 142; 103, 143; 103, 144; 104, 145; 104, 146; 105, 147; 105, 148; 106, 149; 106, 150; 106, 151; 106, 152; 112, 153; 113, 154; 113, 155; 114, 156; 115, 157; 115, 158; 117, 159; 117, 160; 119, 161; 119, 162; 124, 163; 125, 164; 126, 165; 126, 166; 129, 167; 130, 168; 130, 169; 131, 170; 131, 171; 132, 172; 132, 173; 133, 174; 134, 175; 135, 176; 136, 177; 138, 178; 138, 179; 138, 180; 138, 181; 138, 182; 140, 183; 140, 184; 140, 185; 140, 186; 140, 187; 140, 188; 140, 189; 142, 190; 142, 191; 143, 192; 143, 193; 144, 194; 144, 195; 145, 196; 145, 197; 146, 198; 154, 199; 154, 200; 157, 201; 157, 202; 159, 203; 159, 204; 170, 205; 170, 206; 170, 207; 170, 208; 171, 209; 171, 210; 175, 211; 175, 212; 176, 213; 176, 214; 177, 215; 177, 216; 189, 217; 189, 218; 190, 219; 190, 220; 191, 221; 191, 222; 195, 223; 195, 224; 196, 225; 196, 226; 197, 227; 197, 228; 201, 229; 201, 230; 202, 231; 202, 232; 202, 233; 202, 234; 203, 235; 203, 236; 204, 237; 204, 238; 204, 239; 210, 240; 210, 241; 212, 242; 212, 243; 214, 244; 214, 245; 215, 246; 215, 247; 216, 248; 217, 249; 217, 250; 218, 251; 218, 252; 222, 253; 222, 254; 224, 255; 224, 256; 225, 257; 225, 258; 233, 259; 233, 260; 234, 261; 234, 262; 239, 263; 239, 264; 242, 265; 242, 266; 243, 267; 244, 268; 244, 269; 249, 270; 249, 271; 254, 272; 254, 273; 260, 274; 262, 275; 264, 276; 268, 277; 268, 278; 269, 279; 277, 280; 277, 281 | def sort_bam_by_reference(job, job_vars):
"""
Sorts the bam by reference
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
# Unpack variables
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
# I/O
sorted_bam, sorted_bai = return_input_paths(job, work_dir, ids, 'sorted.bam', 'sorted.bam.bai')
output = os.path.join(work_dir, 'sort_by_ref.bam')
# Call: Samtools
ref_seqs = []
handle = subprocess.Popen(["samtools", "view", "-H", sorted_bam], stdout=subprocess.PIPE).stdout
for line in handle:
if line.startswith("@SQ"):
tmp = line.split("\t")
chrom = tmp[1].split(":")[1]
ref_seqs.append(chrom)
handle.close()
# Iterate through chromosomes to create mini-bams
for chrom in ref_seqs:
# job.addChildJobFn(sbbr_child, chrom, os.path.join(work_dir, chrom), sorted_bam)
cmd_view = ["samtools", "view", "-b", sorted_bam, chrom]
cmd_sort = ["samtools", "sort", "-m", "3000000000", "-n", "-", os.path.join(work_dir, chrom)]
p1 = subprocess.Popen(cmd_view, stdout=subprocess.PIPE)
subprocess.check_call(cmd_sort, stdin=p1.stdout)
sorted_files = [os.path.join(work_dir, chrom) + '.bam' for chrom in ref_seqs]
cmd = ["samtools", "cat", "-o", output] + sorted_files
subprocess.check_call(cmd)
# Write to FileStore
ids['sort_by_ref.bam'] = job.fileStore.writeGlobalFile(output)
rsem_id = job.addChildJobFn(transcriptome, job_vars, disk='30 G', memory='30 G').rv()
exon_id = job.addChildJobFn(exon_count, job_vars, disk='30 G').rv()
return exon_id, rsem_id |
0, module; 1, function_definition; 2, function_name:main; 3, parameters; 4, block; 5, expression_statement; 6, comment:# Define Parser object and add to toil; 7, expression_statement; 8, expression_statement; 9, expression_statement; 10, comment:# Store inputs from argparse; 11, expression_statement; 12, comment:# Launch jobs; 13, expression_statement; 14, comment:"""
This is a Toil pipeline for the UNC best practice RNA-Seq analysis.
RNA-seq fastqs are combined, aligned, sorted, filtered, and quantified.
Please read the README.md located in the same directory.
"""; 15, assignment; 16, call; 17, assignment; 18, assignment; 19, call; 20, identifier:parser; 21, call; 22, attribute; 23, argument_list; 24, identifier:args; 25, call; 26, identifier:inputs; 27, dictionary; 28, attribute; 29, argument_list; 30, identifier:build_parser; 31, argument_list; 32, attribute; 33, identifier:addToilOptions; 34, identifier:parser; 35, attribute; 36, argument_list; 37, pair; 38, pair; 39, pair; 40, pair; 41, pair; 42, pair; 43, pair; 44, pair; 45, pair; 46, pair; 47, pair; 48, pair; 49, pair; 50, pair; 51, pair; 52, pair; 53, pair; 54, pair; 55, pair; 56, attribute; 57, identifier:startToil; 58, call; 59, identifier:args; 60, identifier:Job; 61, identifier:Runner; 62, identifier:parser; 63, identifier:parse_args; 64, string; 65, attribute; 66, string; 67, attribute; 68, string; 69, attribute; 70, string; 71, attribute; 72, string; 73, attribute; 74, string; 75, attribute; 76, string; 77, attribute; 78, string; 79, attribute; 80, string; 81, attribute; 82, string; 83, attribute; 84, string; 85, attribute; 86, string; 87, attribute; 88, string; 89, attribute; 90, string; 91, attribute; 92, string; 93, attribute; 94, string; 95, attribute; 96, string; 97, None; 98, string; 99, None; 100, string; 101, None; 102, identifier:Job; 103, identifier:Runner; 104, attribute; 105, argument_list; 106, string_content:config; 107, identifier:args; 108, identifier:config; 109, string_content:config_fastq; 110, identifier:args; 111, identifier:config_fastq; 112, string_content:input; 113, identifier:args; 114, identifier:input; 115, string_content:unc.bed; 116, identifier:args; 117, identifier:unc; 118, string_content:hg19.transcripts.fa; 119, identifier:args; 120, identifier:fasta; 121, string_content:composite_exons.bed; 122, identifier:args; 123, identifier:composite_exons; 124, string_content:normalize.pl; 125, identifier:args; 126, identifier:normalize; 127, string_content:output_dir; 128, identifier:args; 129, identifier:output_dir; 130, string_content:rsem_ref.zip; 131, identifier:args; 132, identifier:rsem_ref; 133, string_content:chromosomes.zip; 134, identifier:args; 135, identifier:chromosomes; 136, string_content:ebwt.zip; 137, identifier:args; 138, identifier:ebwt; 139, string_content:ssec; 140, identifier:args; 141, identifier:ssec; 142, string_content:s3_dir; 143, identifier:args; 144, identifier:s3_dir; 145, string_content:sudo; 146, identifier:args; 147, identifier:sudo; 148, string_content:single_end_reads; 149, identifier:args; 150, identifier:single_end_reads; 151, string_content:upload_bam_to_s3; 152, identifier:args; 153, identifier:upload_bam_to_s3; 154, string_content:uuid; 155, string_content:sample.tar; 156, string_content:cpu_count; 157, identifier:Job; 158, identifier:wrapJobFn; 159, identifier:download_shared_files; 160, identifier:inputs | 0, 1; 1, 2; 1, 3; 1, 4; 4, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 5, 14; 7, 15; 8, 16; 9, 17; 11, 18; 13, 19; 15, 20; 15, 21; 16, 22; 16, 23; 17, 24; 17, 25; 18, 26; 18, 27; 19, 28; 19, 29; 21, 30; 21, 31; 22, 32; 22, 33; 23, 34; 25, 35; 25, 36; 27, 37; 27, 38; 27, 39; 27, 40; 27, 41; 27, 42; 27, 43; 27, 44; 27, 45; 27, 46; 27, 47; 27, 48; 27, 49; 27, 50; 27, 51; 27, 52; 27, 53; 27, 54; 27, 55; 28, 56; 28, 57; 29, 58; 29, 59; 32, 60; 32, 61; 35, 62; 35, 63; 37, 64; 37, 65; 38, 66; 38, 67; 39, 68; 39, 69; 40, 70; 40, 71; 41, 72; 41, 73; 42, 74; 42, 75; 43, 76; 43, 77; 44, 78; 44, 79; 45, 80; 45, 81; 46, 82; 46, 83; 47, 84; 47, 85; 48, 86; 48, 87; 49, 88; 49, 89; 50, 90; 50, 91; 51, 92; 51, 93; 52, 94; 52, 95; 53, 96; 53, 97; 54, 98; 54, 99; 55, 100; 55, 101; 56, 102; 56, 103; 58, 104; 58, 105; 64, 106; 65, 107; 65, 108; 66, 109; 67, 110; 67, 111; 68, 112; 69, 113; 69, 114; 70, 115; 71, 116; 71, 117; 72, 118; 73, 119; 73, 120; 74, 121; 75, 122; 75, 123; 76, 124; 77, 125; 77, 126; 78, 127; 79, 128; 79, 129; 80, 130; 81, 131; 81, 132; 82, 133; 83, 134; 83, 135; 84, 136; 85, 137; 85, 138; 86, 139; 87, 140; 87, 141; 88, 142; 89, 143; 89, 144; 90, 145; 91, 146; 91, 147; 92, 148; 93, 149; 93, 150; 94, 151; 95, 152; 95, 153; 96, 154; 98, 155; 100, 156; 104, 157; 104, 158; 105, 159; 105, 160 | def main():
"""
This is a Toil pipeline for the UNC best practice RNA-Seq analysis.
RNA-seq fastqs are combined, aligned, sorted, filtered, and quantified.
Please read the README.md located in the same directory.
"""
# Define Parser object and add to toil
parser = build_parser()
Job.Runner.addToilOptions(parser)
args = parser.parse_args()
# Store inputs from argparse
inputs = {'config': args.config,
'config_fastq': args.config_fastq,
'input': args.input,
'unc.bed': args.unc,
'hg19.transcripts.fa': args.fasta,
'composite_exons.bed': args.composite_exons,
'normalize.pl': args.normalize,
'output_dir': args.output_dir,
'rsem_ref.zip': args.rsem_ref,
'chromosomes.zip': args.chromosomes,
'ebwt.zip': args.ebwt,
'ssec': args.ssec,
's3_dir': args.s3_dir,
'sudo': args.sudo,
'single_end_reads': args.single_end_reads,
'upload_bam_to_s3': args.upload_bam_to_s3,
'uuid': None,
'sample.tar': None,
'cpu_count': None}
# Launch jobs
Job.Runner.startToil(Job.wrapJobFn(download_shared_files, inputs), args) |
0, module; 1, function_definition; 2, function_name:fourier_series; 3, parameters; 4, block; 5, identifier:self; 6, identifier:pars; 7, identifier:x; 8, identifier:order; 9, expression_statement; 10, expression_statement; 11, for_statement; 12, return_statement; 13, comment:"""
Function to fit Fourier Series.
Parameters
----------
x : array_like
An array of date divided by period. It doesn't need to be sorted.
pars : array_like
Fourier series parameters.
order : int
An order of Fourier series.
"""; 14, assignment; 15, identifier:i; 16, call; 17, block; 18, identifier:sum; 19, identifier:sum; 20, subscript; 21, identifier:range; 22, argument_list; 23, expression_statement; 24, identifier:pars; 25, integer:0; 26, identifier:order; 27, augmented_assignment; 28, identifier:sum; 29, binary_operator:pars[i * 2 + 1] * np.sin(2 * np.pi * (i + 1) * x) \
+ pars[i * 2 + 2] * np.cos(2 * np.pi * (i + 1) * x); 30, binary_operator:pars[i * 2 + 1] * np.sin(2 * np.pi * (i + 1) * x); 31, line_continuation:\; 32, binary_operator:pars[i * 2 + 2] * np.cos(2 * np.pi * (i + 1) * x); 33, subscript; 34, call; 35, subscript; 36, call; 37, identifier:pars; 38, binary_operator:i * 2 + 1; 39, attribute; 40, argument_list; 41, identifier:pars; 42, binary_operator:i * 2 + 2; 43, attribute; 44, argument_list; 45, binary_operator:i * 2; 46, integer:1; 47, identifier:np; 48, identifier:sin; 49, binary_operator:2 * np.pi * (i + 1) * x; 50, binary_operator:i * 2; 51, integer:2; 52, identifier:np; 53, identifier:cos; 54, binary_operator:2 * np.pi * (i + 1) * x; 55, identifier:i; 56, integer:2; 57, binary_operator:2 * np.pi * (i + 1); 58, identifier:x; 59, identifier:i; 60, integer:2; 61, binary_operator:2 * np.pi * (i + 1); 62, identifier:x; 63, binary_operator:2 * np.pi; 64, parenthesized_expression; 65, binary_operator:2 * np.pi; 66, parenthesized_expression; 67, integer:2; 68, attribute; 69, binary_operator:i + 1; 70, integer:2; 71, attribute; 72, binary_operator:i + 1; 73, identifier:np; 74, identifier:pi; 75, identifier:i; 76, integer:1; 77, identifier:np; 78, identifier:pi; 79, identifier:i; 80, integer:1 | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 4, 12; 9, 13; 10, 14; 11, 15; 11, 16; 11, 17; 12, 18; 14, 19; 14, 20; 16, 21; 16, 22; 17, 23; 20, 24; 20, 25; 22, 26; 23, 27; 27, 28; 27, 29; 29, 30; 29, 31; 29, 32; 30, 33; 30, 34; 32, 35; 32, 36; 33, 37; 33, 38; 34, 39; 34, 40; 35, 41; 35, 42; 36, 43; 36, 44; 38, 45; 38, 46; 39, 47; 39, 48; 40, 49; 42, 50; 42, 51; 43, 52; 43, 53; 44, 54; 45, 55; 45, 56; 49, 57; 49, 58; 50, 59; 50, 60; 54, 61; 54, 62; 57, 63; 57, 64; 61, 65; 61, 66; 63, 67; 63, 68; 64, 69; 65, 70; 65, 71; 66, 72; 68, 73; 68, 74; 69, 75; 69, 76; 71, 77; 71, 78; 72, 79; 72, 80 | def fourier_series(self, pars, x, order):
"""
Function to fit Fourier Series.
Parameters
----------
x : array_like
An array of date divided by period. It doesn't need to be sorted.
pars : array_like
Fourier series parameters.
order : int
An order of Fourier series.
"""
sum = pars[0]
for i in range(order):
sum += pars[i * 2 + 1] * np.sin(2 * np.pi * (i + 1) * x) \
+ pars[i * 2 + 2] * np.cos(2 * np.pi * (i + 1) * x)
return sum |
0, module; 1, function_definition; 2, function_name:slope_percentile; 3, parameters; 4, block; 5, identifier:self; 6, identifier:date; 7, identifier:mag; 8, expression_statement; 9, expression_statement; 10, expression_statement; 11, comment:# Remove zero mag_diff.; 12, expression_statement; 13, expression_statement; 14, expression_statement; 15, comment:# Derive slope.; 16, expression_statement; 17, expression_statement; 18, expression_statement; 19, return_statement; 20, comment:"""
Return 10% and 90% percentile of slope.
Parameters
----------
date : array_like
An array of phase-folded date. Sorted.
mag : array_like
An array of phase-folded magnitudes. Sorted by date.
Returns
-------
per_10 : float
10% percentile values of slope.
per_90 : float
90% percentile values of slope.
"""; 21, assignment; 22, assignment; 23, assignment; 24, assignment; 25, assignment; 26, assignment; 27, assignment; 28, assignment; 29, expression_list; 30, identifier:date_diff; 31, binary_operator:date[1:] - date[:len(date) - 1]; 32, identifier:mag_diff; 33, binary_operator:mag[1:] - mag[:len(mag) - 1]; 34, identifier:index; 35, call; 36, identifier:date_diff; 37, subscript; 38, identifier:mag_diff; 39, subscript; 40, identifier:slope; 41, binary_operator:date_diff / mag_diff; 42, identifier:percentile_10; 43, call; 44, identifier:percentile_90; 45, call; 46, identifier:percentile_10; 47, identifier:percentile_90; 48, subscript; 49, subscript; 50, subscript; 51, subscript; 52, attribute; 53, argument_list; 54, identifier:date_diff; 55, identifier:index; 56, identifier:mag_diff; 57, identifier:index; 58, identifier:date_diff; 59, identifier:mag_diff; 60, attribute; 61, argument_list; 62, attribute; 63, argument_list; 64, identifier:date; 65, slice; 66, identifier:date; 67, slice; 68, identifier:mag; 69, slice; 70, identifier:mag; 71, slice; 72, identifier:np; 73, identifier:where; 74, comparison_operator:mag_diff != 0.; 75, identifier:np; 76, identifier:percentile; 77, identifier:slope; 78, float:10.; 79, identifier:np; 80, identifier:percentile; 81, identifier:slope; 82, float:90.; 83, integer:1; 84, binary_operator:len(date) - 1; 85, integer:1; 86, binary_operator:len(mag) - 1; 87, identifier:mag_diff; 88, float:0.; 89, call; 90, integer:1; 91, call; 92, integer:1; 93, identifier:len; 94, argument_list; 95, identifier:len; 96, argument_list; 97, identifier:date; 98, identifier:mag | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 8, 20; 9, 21; 10, 22; 12, 23; 13, 24; 14, 25; 16, 26; 17, 27; 18, 28; 19, 29; 21, 30; 21, 31; 22, 32; 22, 33; 23, 34; 23, 35; 24, 36; 24, 37; 25, 38; 25, 39; 26, 40; 26, 41; 27, 42; 27, 43; 28, 44; 28, 45; 29, 46; 29, 47; 31, 48; 31, 49; 33, 50; 33, 51; 35, 52; 35, 53; 37, 54; 37, 55; 39, 56; 39, 57; 41, 58; 41, 59; 43, 60; 43, 61; 45, 62; 45, 63; 48, 64; 48, 65; 49, 66; 49, 67; 50, 68; 50, 69; 51, 70; 51, 71; 52, 72; 52, 73; 53, 74; 60, 75; 60, 76; 61, 77; 61, 78; 62, 79; 62, 80; 63, 81; 63, 82; 65, 83; 67, 84; 69, 85; 71, 86; 74, 87; 74, 88; 84, 89; 84, 90; 86, 91; 86, 92; 89, 93; 89, 94; 91, 95; 91, 96; 94, 97; 96, 98 | def slope_percentile(self, date, mag):
"""
Return 10% and 90% percentile of slope.
Parameters
----------
date : array_like
An array of phase-folded date. Sorted.
mag : array_like
An array of phase-folded magnitudes. Sorted by date.
Returns
-------
per_10 : float
10% percentile values of slope.
per_90 : float
90% percentile values of slope.
"""
date_diff = date[1:] - date[:len(date) - 1]
mag_diff = mag[1:] - mag[:len(mag) - 1]
# Remove zero mag_diff.
index = np.where(mag_diff != 0.)
date_diff = date_diff[index]
mag_diff = mag_diff[index]
# Derive slope.
slope = date_diff / mag_diff
percentile_10 = np.percentile(slope, 10.)
percentile_90 = np.percentile(slope, 90.)
return percentile_10, percentile_90 |
0, module; 1, function_definition; 2, function_name:number_aware_alphabetical_cmp; 3, parameters; 4, block; 5, identifier:str1; 6, identifier:str2; 7, expression_statement; 8, function_definition; 9, expression_statement; 10, expression_statement; 11, expression_statement; 12, expression_statement; 13, while_statement; 14, if_statement; 15, return_statement; 16, comment:""" cmp function for sorting a list of strings by alphabetical order, but with
numbers sorted numerically.
i.e., foo1, foo2, foo10, foo11
instead of foo1, foo10
"""; 17, function_name:flatten_tokens; 18, parameters; 19, block; 20, assignment; 21, assignment; 22, assignment; 23, assignment; 24, comparison_operator:i < l; 25, block; 26, comparison_operator:len(seq1) < len(seq2); 27, block; 28, elif_clause; 29, integer:0; 30, identifier:tokens; 31, expression_statement; 32, for_statement; 33, return_statement; 34, identifier:seq1; 35, call; 36, identifier:seq2; 37, call; 38, identifier:l; 39, call; 40, identifier:i; 41, integer:0; 42, identifier:i; 43, identifier:l; 44, if_statement; 45, expression_statement; 46, call; 47, call; 48, return_statement; 49, comparison_operator:len(seq1) > len(seq2); 50, block; 51, assignment; 52, identifier:token; 53, identifier:tokens; 54, block; 55, identifier:l; 56, identifier:flatten_tokens; 57, argument_list; 58, identifier:flatten_tokens; 59, argument_list; 60, identifier:min; 61, argument_list; 62, comparison_operator:seq1[i] < seq2[i]; 63, block; 64, elif_clause; 65, augmented_assignment; 66, identifier:len; 67, argument_list; 68, identifier:len; 69, argument_list; 70, unary_operator; 71, call; 72, call; 73, return_statement; 74, identifier:l; 75, list; 76, if_statement; 77, call; 78, call; 79, call; 80, call; 81, subscript; 82, subscript; 83, return_statement; 84, comparison_operator:seq1[i] > seq2[i]; 85, block; 86, identifier:i; 87, integer:1; 88, identifier:seq1; 89, identifier:seq2; 90, integer:1; 91, identifier:len; 92, argument_list; 93, identifier:len; 94, argument_list; 95, integer:1; 96, call; 97, block; 98, else_clause; 99, identifier:tokenize_by_number; 100, argument_list; 101, identifier:tokenize_by_number; 102, argument_list; 103, identifier:len; 104, argument_list; 105, identifier:len; 106, argument_list; 107, identifier:seq1; 108, identifier:i; 109, identifier:seq2; 110, identifier:i; 111, unary_operator; 112, subscript; 113, subscript; 114, return_statement; 115, identifier:seq1; 116, identifier:seq2; 117, identifier:isinstance; 118, argument_list; 119, for_statement; 120, block; 121, identifier:str1; 122, identifier:str2; 123, identifier:seq1; 124, identifier:seq2; 125, integer:1; 126, identifier:seq1; 127, identifier:i; 128, identifier:seq2; 129, identifier:i; 130, integer:1; 131, identifier:token; 132, identifier:str; 133, identifier:char; 134, identifier:token; 135, block; 136, assert_statement; 137, expression_statement; 138, expression_statement; 139, call; 140, call; 141, call; 142, identifier:isinstance; 143, argument_list; 144, attribute; 145, argument_list; 146, attribute; 147, argument_list; 148, identifier:token; 149, identifier:float; 150, identifier:l; 151, identifier:append; 152, identifier:token; 153, identifier:l; 154, identifier:append; 155, identifier:char | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 7, 16; 8, 17; 8, 18; 8, 19; 9, 20; 10, 21; 11, 22; 12, 23; 13, 24; 13, 25; 14, 26; 14, 27; 14, 28; 15, 29; 18, 30; 19, 31; 19, 32; 19, 33; 20, 34; 20, 35; 21, 36; 21, 37; 22, 38; 22, 39; 23, 40; 23, 41; 24, 42; 24, 43; 25, 44; 25, 45; 26, 46; 26, 47; 27, 48; 28, 49; 28, 50; 31, 51; 32, 52; 32, 53; 32, 54; 33, 55; 35, 56; 35, 57; 37, 58; 37, 59; 39, 60; 39, 61; 44, 62; 44, 63; 44, 64; 45, 65; 46, 66; 46, 67; 47, 68; 47, 69; 48, 70; 49, 71; 49, 72; 50, 73; 51, 74; 51, 75; 54, 76; 57, 77; 59, 78; 61, 79; 61, 80; 62, 81; 62, 82; 63, 83; 64, 84; 64, 85; 65, 86; 65, 87; 67, 88; 69, 89; 70, 90; 71, 91; 71, 92; 72, 93; 72, 94; 73, 95; 76, 96; 76, 97; 76, 98; 77, 99; 77, 100; 78, 101; 78, 102; 79, 103; 79, 104; 80, 105; 80, 106; 81, 107; 81, 108; 82, 109; 82, 110; 83, 111; 84, 112; 84, 113; 85, 114; 92, 115; 94, 116; 96, 117; 96, 118; 97, 119; 98, 120; 100, 121; 102, 122; 104, 123; 106, 124; 111, 125; 112, 126; 112, 127; 113, 128; 113, 129; 114, 130; 118, 131; 118, 132; 119, 133; 119, 134; 119, 135; 120, 136; 120, 137; 135, 138; 136, 139; 137, 140; 138, 141; 139, 142; 139, 143; 140, 144; 140, 145; 141, 146; 141, 147; 143, 148; 143, 149; 144, 150; 144, 151; 145, 152; 146, 153; 146, 154; 147, 155 | def number_aware_alphabetical_cmp(str1, str2):
""" cmp function for sorting a list of strings by alphabetical order, but with
numbers sorted numerically.
i.e., foo1, foo2, foo10, foo11
instead of foo1, foo10
"""
def flatten_tokens(tokens):
l = []
for token in tokens:
if isinstance(token, str):
for char in token:
l.append(char)
else:
assert isinstance(token, float)
l.append(token)
return l
seq1 = flatten_tokens(tokenize_by_number(str1))
seq2 = flatten_tokens(tokenize_by_number(str2))
l = min(len(seq1),len(seq2))
i = 0
while i < l:
if seq1[i] < seq2[i]:
return -1
elif seq1[i] > seq2[i]:
return 1
i += 1
if len(seq1) < len(seq2):
return -1
elif len(seq1) > len(seq2):
return 1
return 0 |
0, module; 1, function_definition; 2, function_name:sort_members; 3, parameters; 4, block; 5, identifier:tup; 6, identifier:names; 7, expression_statement; 8, expression_statement; 9, expression_statement; 10, return_statement; 11, comment:"""Return two pairs of members, scalar and tuple members.
The scalars will be sorted s.t. the unbound members are at the top.
"""; 12, assignment; 13, assignment; 14, expression_list; 15, pattern_list; 16, call; 17, pattern_list; 18, call; 19, binary_operator:usorted(unbound) + usorted(bound); 20, call; 21, identifier:scalars; 22, identifier:tuples; 23, identifier:partition; 24, argument_list; 25, identifier:unbound; 26, identifier:bound; 27, identifier:partition; 28, argument_list; 29, call; 30, call; 31, identifier:usorted; 32, argument_list; 33, lambda; 34, identifier:names; 35, lambda; 36, identifier:scalars; 37, identifier:usorted; 38, argument_list; 39, identifier:usorted; 40, argument_list; 41, identifier:tuples; 42, lambda_parameters; 43, not_operator; 44, lambda_parameters; 45, call; 46, identifier:unbound; 47, identifier:bound; 48, identifier:x; 49, call; 50, identifier:x; 51, attribute; 52, argument_list; 53, identifier:is_tuple_node; 54, argument_list; 55, attribute; 56, identifier:is_unbound; 57, attribute; 58, subscript; 59, identifier:value; 60, subscript; 61, identifier:value; 62, attribute; 63, identifier:x; 64, attribute; 65, identifier:x; 66, identifier:tup; 67, identifier:member; 68, identifier:tup; 69, identifier:member | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 7, 11; 8, 12; 9, 13; 10, 14; 12, 15; 12, 16; 13, 17; 13, 18; 14, 19; 14, 20; 15, 21; 15, 22; 16, 23; 16, 24; 17, 25; 17, 26; 18, 27; 18, 28; 19, 29; 19, 30; 20, 31; 20, 32; 24, 33; 24, 34; 28, 35; 28, 36; 29, 37; 29, 38; 30, 39; 30, 40; 32, 41; 33, 42; 33, 43; 35, 44; 35, 45; 38, 46; 40, 47; 42, 48; 43, 49; 44, 50; 45, 51; 45, 52; 49, 53; 49, 54; 51, 55; 51, 56; 54, 57; 55, 58; 55, 59; 57, 60; 57, 61; 58, 62; 58, 63; 60, 64; 60, 65; 62, 66; 62, 67; 64, 68; 64, 69 | def sort_members(tup, names):
"""Return two pairs of members, scalar and tuple members.
The scalars will be sorted s.t. the unbound members are at the top.
"""
scalars, tuples = partition(lambda x: not is_tuple_node(tup.member[x].value), names)
unbound, bound = partition(lambda x: tup.member[x].value.is_unbound(), scalars)
return usorted(unbound) + usorted(bound), usorted(tuples) |
0, module; 1, function_definition; 2, function_name:sort_schemas; 3, parameters; 4, block; 5, identifier:schemas; 6, expression_statement; 7, function_definition; 8, return_statement; 9, comment:"""Sort a list of SQL schemas in order"""; 10, function_name:keyfun; 11, parameters; 12, block; 13, call; 14, identifier:v; 15, expression_statement; 16, comment:# x3: 'DEV' should come before ''; 17, return_statement; 18, identifier:sorted; 19, argument_list; 20, assignment; 21, tuple; 22, identifier:schemas; 23, keyword_argument; 24, identifier:x; 25, call; 26, call; 27, subscript; 28, conditional_expression:int(x[2]) if x[2] else None; 29, conditional_expression:x[3] if x[3] else 'zzz'; 30, call; 31, identifier:key; 32, identifier:keyfun; 33, attribute; 34, argument_list; 35, identifier:int; 36, argument_list; 37, identifier:x; 38, integer:1; 39, call; 40, subscript; 41, None; 42, subscript; 43, subscript; 44, string; 45, identifier:int; 46, argument_list; 47, call; 48, identifier:groups; 49, subscript; 50, identifier:int; 51, argument_list; 52, identifier:x; 53, integer:2; 54, identifier:x; 55, integer:3; 56, identifier:x; 57, integer:3; 58, string_content:zzz; 59, subscript; 60, attribute; 61, argument_list; 62, identifier:x; 63, integer:0; 64, subscript; 65, identifier:x; 66, integer:4; 67, identifier:SQL_SCHEMA_REGEXP; 68, identifier:match; 69, identifier:v; 70, identifier:x; 71, integer:2 | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 6, 9; 7, 10; 7, 11; 7, 12; 8, 13; 11, 14; 12, 15; 12, 16; 12, 17; 13, 18; 13, 19; 15, 20; 17, 21; 19, 22; 19, 23; 20, 24; 20, 25; 21, 26; 21, 27; 21, 28; 21, 29; 21, 30; 23, 31; 23, 32; 25, 33; 25, 34; 26, 35; 26, 36; 27, 37; 27, 38; 28, 39; 28, 40; 28, 41; 29, 42; 29, 43; 29, 44; 30, 45; 30, 46; 33, 47; 33, 48; 36, 49; 39, 50; 39, 51; 40, 52; 40, 53; 42, 54; 42, 55; 43, 56; 43, 57; 44, 58; 46, 59; 47, 60; 47, 61; 49, 62; 49, 63; 51, 64; 59, 65; 59, 66; 60, 67; 60, 68; 61, 69; 64, 70; 64, 71 | def sort_schemas(schemas):
"""Sort a list of SQL schemas in order"""
def keyfun(v):
x = SQL_SCHEMA_REGEXP.match(v).groups()
# x3: 'DEV' should come before ''
return (int(x[0]), x[1], int(x[2]) if x[2] else None,
x[3] if x[3] else 'zzz', int(x[4]))
return sorted(schemas, key=keyfun) |
0, module; 1, function_definition; 2, function_name:sort_nodes; 3, parameters; 4, block; 5, identifier:dom; 6, identifier:cmp_func; 7, expression_statement; 8, expression_statement; 9, for_statement; 10, comment:"""
Sort the nodes of the dom in-place, based on a comparison function.
"""; 11, call; 12, identifier:node; 13, call; 14, block; 15, attribute; 16, argument_list; 17, identifier:list; 18, argument_list; 19, expression_statement; 20, while_statement; 21, identifier:dom; 22, identifier:normalize; 23, call; 24, assignment; 25, boolean_operator; 26, block; 27, identifier:walk_dom; 28, argument_list; 29, identifier:prev_sib; 30, attribute; 31, identifier:prev_sib; 32, comparison_operator:cmp_func(prev_sib, node) == 1; 33, expression_statement; 34, expression_statement; 35, identifier:dom; 36, keyword_argument; 37, identifier:node; 38, identifier:previousSibling; 39, call; 40, integer:1; 41, call; 42, assignment; 43, identifier:elements_only; 44, True; 45, identifier:cmp_func; 46, argument_list; 47, attribute; 48, argument_list; 49, identifier:prev_sib; 50, attribute; 51, identifier:prev_sib; 52, identifier:node; 53, attribute; 54, identifier:insertBefore; 55, identifier:node; 56, identifier:prev_sib; 57, identifier:node; 58, identifier:previousSibling; 59, identifier:node; 60, identifier:parentNode | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 7, 10; 8, 11; 9, 12; 9, 13; 9, 14; 11, 15; 11, 16; 13, 17; 13, 18; 14, 19; 14, 20; 15, 21; 15, 22; 18, 23; 19, 24; 20, 25; 20, 26; 23, 27; 23, 28; 24, 29; 24, 30; 25, 31; 25, 32; 26, 33; 26, 34; 28, 35; 28, 36; 30, 37; 30, 38; 32, 39; 32, 40; 33, 41; 34, 42; 36, 43; 36, 44; 39, 45; 39, 46; 41, 47; 41, 48; 42, 49; 42, 50; 46, 51; 46, 52; 47, 53; 47, 54; 48, 55; 48, 56; 50, 57; 50, 58; 53, 59; 53, 60 | def sort_nodes(dom, cmp_func):
"""
Sort the nodes of the dom in-place, based on a comparison function.
"""
dom.normalize()
for node in list(walk_dom(dom, elements_only=True)):
prev_sib = node.previousSibling
while prev_sib and cmp_func(prev_sib, node) == 1:
node.parentNode.insertBefore(node, prev_sib)
prev_sib = node.previousSibling |
0, module; 1, function_definition; 2, function_name:currentdir; 3, parameters; 4, type; 5, block; 6, identifier:self; 7, identifier:str; 8, expression_statement; 9, if_statement; 10, return_statement; 11, comment:"""Name of the current working directory containing the relevant files.
To show most of the functionality of |property|
|FileManager.currentdir| (unpacking zip files on the fly is
explained in the documentation on function
(|FileManager.zip_currentdir|), we first prepare a |FileManager|
object corresponding to the |FileManager.basepath|
`projectname/basename`:
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> import os
>>> from hydpy import repr_, TestIO
>>> TestIO.clear()
>>> with TestIO():
... os.makedirs('projectname/basename')
... repr_(filemanager.basepath) # doctest: +ELLIPSIS
'...hydpy/tests/iotesting/projectname/basename'
At first, the base directory is empty and asking for the
current working directory results in the following error:
>>> with TestIO():
... filemanager.currentdir # doctest: +ELLIPSIS
Traceback (most recent call last):
...
RuntimeError: The current working directory of the FileManager object \
has not been defined manually and cannot be determined automatically: \
`.../projectname/basename` does not contain any available directories.
If only one directory exists, it is considered as the current
working directory automatically:
>>> with TestIO():
... os.mkdir('projectname/basename/dir1')
... filemanager.currentdir
'dir1'
|property| |FileManager.currentdir| memorises the name of the
current working directory, even if another directory is later
added to the base path:
>>> with TestIO():
... os.mkdir('projectname/basename/dir2')
... filemanager.currentdir
'dir1'
Set the value of |FileManager.currentdir| to |None| to let it
forget the memorised directory. After that, asking for the
current working directory now results in another error, as
it is not clear which directory to select:
>>> with TestIO():
... filemanager.currentdir = None
... filemanager.currentdir # doctest: +ELLIPSIS
Traceback (most recent call last):
...
RuntimeError: The current working directory of the FileManager object \
has not been defined manually and cannot be determined automatically: \
`....../projectname/basename` does contain multiple available directories \
(dir1 and dir2).
Setting |FileManager.currentdir| manually solves the problem:
>>> with TestIO():
... filemanager.currentdir = 'dir1'
... filemanager.currentdir
'dir1'
Remove the current working directory `dir1` with the `del` statement:
>>> with TestIO():
... del filemanager.currentdir
... os.path.exists('projectname/basename/dir1')
False
|FileManager| subclasses can define a default directory name.
When many directories exist and none is selected manually, the
default directory is selected automatically. The following
example shows an error message due to multiple directories
without any having the default name:
>>> with TestIO():
... os.mkdir('projectname/basename/dir1')
... filemanager.DEFAULTDIR = 'dir3'
... del filemanager.currentdir
... filemanager.currentdir # doctest: +ELLIPSIS
Traceback (most recent call last):
...
RuntimeError: The current working directory of the FileManager object \
has not been defined manually and cannot be determined automatically: The \
default directory (dir3) is not among the available directories (dir1 and dir2).
We can fix this by adding the required default directory manually:
>>> with TestIO():
... os.mkdir('projectname/basename/dir3')
... filemanager.currentdir
'dir3'
Setting the |FileManager.currentdir| to `dir4` not only overwrites
the default name, but also creates the required folder:
>>> with TestIO():
... filemanager.currentdir = 'dir4'
... filemanager.currentdir
'dir4'
>>> with TestIO():
... sorted(os.listdir('projectname/basename'))
['dir1', 'dir2', 'dir3', 'dir4']
Failed attempts in removing directories result in error messages
like the following one:
>>> import shutil
>>> from unittest.mock import patch
>>> with patch.object(shutil, 'rmtree', side_effect=AttributeError):
... with TestIO():
... del filemanager.currentdir # doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: While trying to delete the current working directory \
`.../projectname/basename/dir4` of the FileManager object, the following \
error occurred: ...
Then, the current working directory still exists and is remembered
by |FileManager.currentdir|:
>>> with TestIO():
... filemanager.currentdir
'dir4'
>>> with TestIO():
... sorted(os.listdir('projectname/basename'))
['dir1', 'dir2', 'dir3', 'dir4']
"""; 12, comparison_operator:self._currentdir is None; 13, block; 14, attribute; 15, attribute; 16, None; 17, expression_statement; 18, if_statement; 19, identifier:self; 20, identifier:_currentdir; 21, identifier:self; 22, identifier:_currentdir; 23, assignment; 24, comparison_operator:len(directories) == 1; 25, block; 26, elif_clause; 27, else_clause; 28, identifier:directories; 29, attribute; 30, call; 31, integer:1; 32, expression_statement; 33, comparison_operator:self.DEFAULTDIR in directories; 34, block; 35, block; 36, attribute; 37, identifier:folders; 38, identifier:len; 39, argument_list; 40, assignment; 41, attribute; 42, identifier:directories; 43, expression_statement; 44, expression_statement; 45, if_statement; 46, if_statement; 47, raise_statement; 48, identifier:self; 49, identifier:availabledirs; 50, identifier:directories; 51, attribute; 52, subscript; 53, identifier:self; 54, identifier:DEFAULTDIR; 55, assignment; 56, assignment; 57, not_operator; 58, block; 59, comparison_operator:self.DEFAULTDIR is None; 60, block; 61, call; 62, identifier:self; 63, identifier:currentdir; 64, identifier:directories; 65, integer:0; 66, attribute; 67, attribute; 68, identifier:prefix; 69, parenthesized_expression; 70, identifier:directories; 71, raise_statement; 72, attribute; 73, None; 74, raise_statement; 75, identifier:RuntimeError; 76, argument_list; 77, identifier:self; 78, identifier:currentdir; 79, identifier:self; 80, identifier:DEFAULTDIR; 81, concatenated_string; 82, call; 83, identifier:self; 84, identifier:DEFAULTDIR; 85, call; 86, concatenated_string; 87, string; 88, string; 89, string; 90, string; 91, identifier:RuntimeError; 92, argument_list; 93, identifier:RuntimeError; 94, argument_list; 95, string; 96, string; 97, string; 98, string_content:The current working directory of the; 99, interpolation; 100, string_content:object; 101, string_content:has not been defined manually and cannot; 102, string_content:be determined automatically:; 103, concatenated_string; 104, concatenated_string; 105, interpolation; 106, string_content:The default directory (; 107, interpolation; 108, string_content:); 109, string_content:is not among the available directories; 110, string_content:(; 111, interpolation; 112, string_content:).; 113, call; 114, string; 115, string; 116, string; 117, string; 118, string; 119, identifier:prefix; 120, attribute; 121, call; 122, attribute; 123, argument_list; 124, interpolation; 125, string_content:`; 126, interpolation; 127, string_content:`; 128, string_content:does not contain any available directories.; 129, interpolation; 130, string_content:`; 131, interpolation; 132, string_content:`; 133, string_content:does contain multiple available directories; 134, string_content:(; 135, interpolation; 136, string_content:).; 137, identifier:self; 138, identifier:DEFAULTDIR; 139, attribute; 140, argument_list; 141, identifier:objecttools; 142, identifier:classname; 143, identifier:self; 144, identifier:prefix; 145, call; 146, identifier:prefix; 147, call; 148, call; 149, identifier:objecttools; 150, identifier:enumeration; 151, identifier:directories; 152, attribute; 153, argument_list; 154, attribute; 155, argument_list; 156, attribute; 157, argument_list; 158, identifier:objecttools; 159, identifier:repr_; 160, attribute; 161, identifier:objecttools; 162, identifier:repr_; 163, attribute; 164, identifier:objecttools; 165, identifier:enumeration; 166, identifier:directories; 167, identifier:self; 168, identifier:basepath; 169, identifier:self; 170, identifier:basepath | 0, 1; 1, 2; 1, 3; 1, 4; 1, 5; 3, 6; 4, 7; 5, 8; 5, 9; 5, 10; 8, 11; 9, 12; 9, 13; 10, 14; 12, 15; 12, 16; 13, 17; 13, 18; 14, 19; 14, 20; 15, 21; 15, 22; 17, 23; 18, 24; 18, 25; 18, 26; 18, 27; 23, 28; 23, 29; 24, 30; 24, 31; 25, 32; 26, 33; 26, 34; 27, 35; 29, 36; 29, 37; 30, 38; 30, 39; 32, 40; 33, 41; 33, 42; 34, 43; 35, 44; 35, 45; 35, 46; 35, 47; 36, 48; 36, 49; 39, 50; 40, 51; 40, 52; 41, 53; 41, 54; 43, 55; 44, 56; 45, 57; 45, 58; 46, 59; 46, 60; 47, 61; 51, 62; 51, 63; 52, 64; 52, 65; 55, 66; 55, 67; 56, 68; 56, 69; 57, 70; 58, 71; 59, 72; 59, 73; 60, 74; 61, 75; 61, 76; 66, 77; 66, 78; 67, 79; 67, 80; 69, 81; 71, 82; 72, 83; 72, 84; 74, 85; 76, 86; 81, 87; 81, 88; 81, 89; 81, 90; 82, 91; 82, 92; 85, 93; 85, 94; 86, 95; 86, 96; 86, 97; 87, 98; 88, 99; 88, 100; 89, 101; 90, 102; 92, 103; 94, 104; 95, 105; 95, 106; 95, 107; 95, 108; 96, 109; 97, 110; 97, 111; 97, 112; 99, 113; 103, 114; 103, 115; 104, 116; 104, 117; 104, 118; 105, 119; 107, 120; 111, 121; 113, 122; 113, 123; 114, 124; 114, 125; 114, 126; 114, 127; 115, 128; 116, 129; 116, 130; 116, 131; 116, 132; 117, 133; 118, 134; 118, 135; 118, 136; 120, 137; 120, 138; 121, 139; 121, 140; 122, 141; 122, 142; 123, 143; 124, 144; 126, 145; 129, 146; 131, 147; 135, 148; 139, 149; 139, 150; 140, 151; 145, 152; 145, 153; 147, 154; 147, 155; 148, 156; 148, 157; 152, 158; 152, 159; 153, 160; 154, 161; 154, 162; 155, 163; 156, 164; 156, 165; 157, 166; 160, 167; 160, 168; 163, 169; 163, 170 | def currentdir(self) -> str:
"""Name of the current working directory containing the relevant files.
To show most of the functionality of |property|
|FileManager.currentdir| (unpacking zip files on the fly is
explained in the documentation on function
(|FileManager.zip_currentdir|), we first prepare a |FileManager|
object corresponding to the |FileManager.basepath|
`projectname/basename`:
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> import os
>>> from hydpy import repr_, TestIO
>>> TestIO.clear()
>>> with TestIO():
... os.makedirs('projectname/basename')
... repr_(filemanager.basepath) # doctest: +ELLIPSIS
'...hydpy/tests/iotesting/projectname/basename'
At first, the base directory is empty and asking for the
current working directory results in the following error:
>>> with TestIO():
... filemanager.currentdir # doctest: +ELLIPSIS
Traceback (most recent call last):
...
RuntimeError: The current working directory of the FileManager object \
has not been defined manually and cannot be determined automatically: \
`.../projectname/basename` does not contain any available directories.
If only one directory exists, it is considered as the current
working directory automatically:
>>> with TestIO():
... os.mkdir('projectname/basename/dir1')
... filemanager.currentdir
'dir1'
|property| |FileManager.currentdir| memorises the name of the
current working directory, even if another directory is later
added to the base path:
>>> with TestIO():
... os.mkdir('projectname/basename/dir2')
... filemanager.currentdir
'dir1'
Set the value of |FileManager.currentdir| to |None| to let it
forget the memorised directory. After that, asking for the
current working directory now results in another error, as
it is not clear which directory to select:
>>> with TestIO():
... filemanager.currentdir = None
... filemanager.currentdir # doctest: +ELLIPSIS
Traceback (most recent call last):
...
RuntimeError: The current working directory of the FileManager object \
has not been defined manually and cannot be determined automatically: \
`....../projectname/basename` does contain multiple available directories \
(dir1 and dir2).
Setting |FileManager.currentdir| manually solves the problem:
>>> with TestIO():
... filemanager.currentdir = 'dir1'
... filemanager.currentdir
'dir1'
Remove the current working directory `dir1` with the `del` statement:
>>> with TestIO():
... del filemanager.currentdir
... os.path.exists('projectname/basename/dir1')
False
|FileManager| subclasses can define a default directory name.
When many directories exist and none is selected manually, the
default directory is selected automatically. The following
example shows an error message due to multiple directories
without any having the default name:
>>> with TestIO():
... os.mkdir('projectname/basename/dir1')
... filemanager.DEFAULTDIR = 'dir3'
... del filemanager.currentdir
... filemanager.currentdir # doctest: +ELLIPSIS
Traceback (most recent call last):
...
RuntimeError: The current working directory of the FileManager object \
has not been defined manually and cannot be determined automatically: The \
default directory (dir3) is not among the available directories (dir1 and dir2).
We can fix this by adding the required default directory manually:
>>> with TestIO():
... os.mkdir('projectname/basename/dir3')
... filemanager.currentdir
'dir3'
Setting the |FileManager.currentdir| to `dir4` not only overwrites
the default name, but also creates the required folder:
>>> with TestIO():
... filemanager.currentdir = 'dir4'
... filemanager.currentdir
'dir4'
>>> with TestIO():
... sorted(os.listdir('projectname/basename'))
['dir1', 'dir2', 'dir3', 'dir4']
Failed attempts in removing directories result in error messages
like the following one:
>>> import shutil
>>> from unittest.mock import patch
>>> with patch.object(shutil, 'rmtree', side_effect=AttributeError):
... with TestIO():
... del filemanager.currentdir # doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: While trying to delete the current working directory \
`.../projectname/basename/dir4` of the FileManager object, the following \
error occurred: ...
Then, the current working directory still exists and is remembered
by |FileManager.currentdir|:
>>> with TestIO():
... filemanager.currentdir
'dir4'
>>> with TestIO():
... sorted(os.listdir('projectname/basename'))
['dir1', 'dir2', 'dir3', 'dir4']
"""
if self._currentdir is None:
directories = self.availabledirs.folders
if len(directories) == 1:
self.currentdir = directories[0]
elif self.DEFAULTDIR in directories:
self.currentdir = self.DEFAULTDIR
else:
prefix = (f'The current working directory of the '
f'{objecttools.classname(self)} object '
f'has not been defined manually and cannot '
f'be determined automatically:')
if not directories:
raise RuntimeError(
f'{prefix} `{objecttools.repr_(self.basepath)}` '
f'does not contain any available directories.')
if self.DEFAULTDIR is None:
raise RuntimeError(
f'{prefix} `{objecttools.repr_(self.basepath)}` '
f'does contain multiple available directories '
f'({objecttools.enumeration(directories)}).')
raise RuntimeError(
f'{prefix} The default directory ({self.DEFAULTDIR}) '
f'is not among the available directories '
f'({objecttools.enumeration(directories)}).')
return self._currentdir |
0, module; 1, function_definition; 2, function_name:zip_currentdir; 3, parameters; 4, type; 5, block; 6, identifier:self; 7, None; 8, expression_statement; 9, with_statement; 10, delete_statement; 11, comment:"""Pack the current working directory in a `zip` file.
|FileManager| subclasses allow for manual packing and automatic
unpacking of working directories. The only supported format is `zip`.
To avoid possible inconsistencies, origin directories and zip
files are removed after packing or unpacking, respectively.
As an example scenario, we prepare a |FileManager| object with
the current working directory `folder` containing the files
`test1.txt` and `text2.txt`:
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> import os
>>> from hydpy import repr_, TestIO
>>> TestIO.clear()
>>> basepath = 'projectname/basename'
>>> with TestIO():
... os.makedirs(basepath)
... filemanager.currentdir = 'folder'
... open(f'{basepath}/folder/file1.txt', 'w').close()
... open(f'{basepath}/folder/file2.txt', 'w').close()
... filemanager.filenames
['file1.txt', 'file2.txt']
The directories existing under the base path are identical
with the ones returned by property |FileManager.availabledirs|:
>>> with TestIO():
... sorted(os.listdir(basepath))
... filemanager.availabledirs # doctest: +ELLIPSIS
['folder']
Folder2Path(folder=.../projectname/basename/folder)
After packing the current working directory manually, it is
still counted as a available directory:
>>> with TestIO():
... filemanager.zip_currentdir()
... sorted(os.listdir(basepath))
... filemanager.availabledirs # doctest: +ELLIPSIS
['folder.zip']
Folder2Path(folder=.../projectname/basename/folder.zip)
Instead of the complete directory, only the contained files
are packed:
>>> from zipfile import ZipFile
>>> with TestIO():
... with ZipFile('projectname/basename/folder.zip', 'r') as zp:
... sorted(zp.namelist())
['file1.txt', 'file2.txt']
The zip file is unpacked again, as soon as `folder` becomes
the current working directory:
>>> with TestIO():
... filemanager.currentdir = 'folder'
... sorted(os.listdir(basepath))
... filemanager.availabledirs
... filemanager.filenames # doctest: +ELLIPSIS
['folder']
Folder2Path(folder=.../projectname/basename/folder)
['file1.txt', 'file2.txt']
"""; 12, with_clause; 13, block; 14, attribute; 15, with_item; 16, for_statement; 17, identifier:self; 18, identifier:currentdir; 19, as_pattern; 20, pattern_list; 21, call; 22, block; 23, call; 24, as_pattern_target; 25, identifier:filepath; 26, identifier:filename; 27, identifier:zip; 28, argument_list; 29, expression_statement; 30, attribute; 31, argument_list; 32, identifier:zipfile_; 33, attribute; 34, attribute; 35, call; 36, identifier:zipfile; 37, identifier:ZipFile; 38, string; 39, string; 40, identifier:self; 41, identifier:filepaths; 42, identifier:self; 43, identifier:filenames; 44, attribute; 45, argument_list; 46, interpolation; 47, string_content:.zip; 48, string_content:w; 49, identifier:zipfile_; 50, identifier:write; 51, keyword_argument; 52, keyword_argument; 53, attribute; 54, identifier:filename; 55, identifier:filepath; 56, identifier:arcname; 57, identifier:filename; 58, identifier:self; 59, identifier:currentpath | 0, 1; 1, 2; 1, 3; 1, 4; 1, 5; 3, 6; 4, 7; 5, 8; 5, 9; 5, 10; 8, 11; 9, 12; 9, 13; 10, 14; 12, 15; 13, 16; 14, 17; 14, 18; 15, 19; 16, 20; 16, 21; 16, 22; 19, 23; 19, 24; 20, 25; 20, 26; 21, 27; 21, 28; 22, 29; 23, 30; 23, 31; 24, 32; 28, 33; 28, 34; 29, 35; 30, 36; 30, 37; 31, 38; 31, 39; 33, 40; 33, 41; 34, 42; 34, 43; 35, 44; 35, 45; 38, 46; 38, 47; 39, 48; 44, 49; 44, 50; 45, 51; 45, 52; 46, 53; 51, 54; 51, 55; 52, 56; 52, 57; 53, 58; 53, 59 | def zip_currentdir(self) -> None:
"""Pack the current working directory in a `zip` file.
|FileManager| subclasses allow for manual packing and automatic
unpacking of working directories. The only supported format is `zip`.
To avoid possible inconsistencies, origin directories and zip
files are removed after packing or unpacking, respectively.
As an example scenario, we prepare a |FileManager| object with
the current working directory `folder` containing the files
`test1.txt` and `text2.txt`:
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> import os
>>> from hydpy import repr_, TestIO
>>> TestIO.clear()
>>> basepath = 'projectname/basename'
>>> with TestIO():
... os.makedirs(basepath)
... filemanager.currentdir = 'folder'
... open(f'{basepath}/folder/file1.txt', 'w').close()
... open(f'{basepath}/folder/file2.txt', 'w').close()
... filemanager.filenames
['file1.txt', 'file2.txt']
The directories existing under the base path are identical
with the ones returned by property |FileManager.availabledirs|:
>>> with TestIO():
... sorted(os.listdir(basepath))
... filemanager.availabledirs # doctest: +ELLIPSIS
['folder']
Folder2Path(folder=.../projectname/basename/folder)
After packing the current working directory manually, it is
still counted as a available directory:
>>> with TestIO():
... filemanager.zip_currentdir()
... sorted(os.listdir(basepath))
... filemanager.availabledirs # doctest: +ELLIPSIS
['folder.zip']
Folder2Path(folder=.../projectname/basename/folder.zip)
Instead of the complete directory, only the contained files
are packed:
>>> from zipfile import ZipFile
>>> with TestIO():
... with ZipFile('projectname/basename/folder.zip', 'r') as zp:
... sorted(zp.namelist())
['file1.txt', 'file2.txt']
The zip file is unpacked again, as soon as `folder` becomes
the current working directory:
>>> with TestIO():
... filemanager.currentdir = 'folder'
... sorted(os.listdir(basepath))
... filemanager.availabledirs
... filemanager.filenames # doctest: +ELLIPSIS
['folder']
Folder2Path(folder=.../projectname/basename/folder)
['file1.txt', 'file2.txt']
"""
with zipfile.ZipFile(f'{self.currentpath}.zip', 'w') as zipfile_:
for filepath, filename in zip(self.filepaths, self.filenames):
zipfile_.write(filename=filepath, arcname=filename)
del self.currentdir |
0, module; 1, function_definition; 2, function_name:keywords; 3, parameters; 4, type; 5, block; 6, identifier:self; 7, generic_type; 8, expression_statement; 9, return_statement; 10, identifier:Set; 11, type_parameter; 12, comment:"""A set of all keywords of all handled devices.
In addition to attribute access via device names, |Nodes| and
|Elements| objects allow for attribute access via keywords,
allowing for an efficient search of certain groups of devices.
Let us use the example from above, where the nodes `na` and `nb`
have no keywords, but each of the other three nodes both belongs
to either `group_a` or `group_b` and `group_1` or `group_2`:
>>> from hydpy import Node, Nodes
>>> nodes = Nodes('na',
... Node('nb', variable='W'),
... Node('nc', keywords=('group_a', 'group_1')),
... Node('nd', keywords=('group_a', 'group_2')),
... Node('ne', keywords=('group_b', 'group_1')))
>>> nodes
Nodes("na", "nb", "nc", "nd", "ne")
>>> sorted(nodes.keywords)
['group_1', 'group_2', 'group_a', 'group_b']
If you are interested in inspecting all devices belonging to
`group_a`, select them via this keyword:
>>> subgroup = nodes.group_1
>>> subgroup
Nodes("nc", "ne")
You can further restrict the search by also selecting the devices
belonging to `group_b`, which holds only for node "e", in the given
example:
>>> subsubgroup = subgroup.group_b
>>> subsubgroup
Node("ne", variable="Q",
keywords=["group_1", "group_b"])
Note that the keywords already used for building a device subgroup
are not informative anymore (as they hold for each device) and are
thus not shown anymore:
>>> sorted(subgroup.keywords)
['group_a', 'group_b']
The latter might be confusing if you intend to work with a device
subgroup for a longer time. After copying the subgroup, all
keywords of the contained devices are available again:
>>> from copy import copy
>>> newgroup = copy(subgroup)
>>> sorted(newgroup.keywords)
['group_1', 'group_a', 'group_b']
"""; 13, call; 14, type; 15, identifier:set; 16, generator_expression; 17, identifier:str; 18, identifier:keyword; 19, for_in_clause; 20, for_in_clause; 21, if_clause; 22, identifier:device; 23, identifier:self; 24, identifier:keyword; 25, attribute; 26, comparison_operator:keyword not in self._shadowed_keywords; 27, identifier:device; 28, identifier:keywords; 29, identifier:keyword; 30, attribute; 31, identifier:self; 32, identifier:_shadowed_keywords | 0, 1; 1, 2; 1, 3; 1, 4; 1, 5; 3, 6; 4, 7; 5, 8; 5, 9; 7, 10; 7, 11; 8, 12; 9, 13; 11, 14; 13, 15; 13, 16; 14, 17; 16, 18; 16, 19; 16, 20; 16, 21; 19, 22; 19, 23; 20, 24; 20, 25; 21, 26; 25, 27; 25, 28; 26, 29; 26, 30; 30, 31; 30, 32 | def keywords(self) -> Set[str]:
"""A set of all keywords of all handled devices.
In addition to attribute access via device names, |Nodes| and
|Elements| objects allow for attribute access via keywords,
allowing for an efficient search of certain groups of devices.
Let us use the example from above, where the nodes `na` and `nb`
have no keywords, but each of the other three nodes both belongs
to either `group_a` or `group_b` and `group_1` or `group_2`:
>>> from hydpy import Node, Nodes
>>> nodes = Nodes('na',
... Node('nb', variable='W'),
... Node('nc', keywords=('group_a', 'group_1')),
... Node('nd', keywords=('group_a', 'group_2')),
... Node('ne', keywords=('group_b', 'group_1')))
>>> nodes
Nodes("na", "nb", "nc", "nd", "ne")
>>> sorted(nodes.keywords)
['group_1', 'group_2', 'group_a', 'group_b']
If you are interested in inspecting all devices belonging to
`group_a`, select them via this keyword:
>>> subgroup = nodes.group_1
>>> subgroup
Nodes("nc", "ne")
You can further restrict the search by also selecting the devices
belonging to `group_b`, which holds only for node "e", in the given
example:
>>> subsubgroup = subgroup.group_b
>>> subsubgroup
Node("ne", variable="Q",
keywords=["group_1", "group_b"])
Note that the keywords already used for building a device subgroup
are not informative anymore (as they hold for each device) and are
thus not shown anymore:
>>> sorted(subgroup.keywords)
['group_a', 'group_b']
The latter might be confusing if you intend to work with a device
subgroup for a longer time. After copying the subgroup, all
keywords of the contained devices are available again:
>>> from copy import copy
>>> newgroup = copy(subgroup)
>>> sorted(newgroup.keywords)
['group_1', 'group_a', 'group_b']
"""
return set(keyword for device in self
for keyword in device.keywords if
keyword not in self._shadowed_keywords) |
0, module; 1, function_definition; 2, function_name:variables; 3, parameters; 4, type; 5, block; 6, identifier:self; 7, generic_type; 8, expression_statement; 9, expression_statement; 10, for_statement; 11, return_statement; 12, identifier:Set; 13, type_parameter; 14, comment:"""A set of all different |Node.variable| values of the |Node|
objects directly connected to the actual |Element| object.
Suppose there is an element connected to five nodes, which (partly)
represent different variables:
>>> from hydpy import Element, Node
>>> element = Element('Test',
... inlets=(Node('N1', 'X'), Node('N2', 'Y1')),
... outlets=(Node('N3', 'X'), Node('N4', 'Y2')),
... receivers=(Node('N5', 'X'), Node('N6', 'Y3')),
... senders=(Node('N7', 'X'), Node('N8', 'Y4')))
Property |Element.variables| puts all the different variables of
these nodes together:
>>> sorted(element.variables)
['X', 'Y1', 'Y2', 'Y3', 'Y4']
"""; 15, assignment; 16, identifier:connection; 17, attribute; 18, block; 19, identifier:variables; 20, type; 21, identifier:variables; 22, type; 23, call; 24, identifier:self; 25, identifier:__connections; 26, expression_statement; 27, identifier:str; 28, generic_type; 29, identifier:set; 30, argument_list; 31, call; 32, identifier:Set; 33, type_parameter; 34, attribute; 35, argument_list; 36, type; 37, identifier:variables; 38, identifier:update; 39, attribute; 40, identifier:str; 41, identifier:connection; 42, identifier:variables | 0, 1; 1, 2; 1, 3; 1, 4; 1, 5; 3, 6; 4, 7; 5, 8; 5, 9; 5, 10; 5, 11; 7, 12; 7, 13; 8, 14; 9, 15; 10, 16; 10, 17; 10, 18; 11, 19; 13, 20; 15, 21; 15, 22; 15, 23; 17, 24; 17, 25; 18, 26; 20, 27; 22, 28; 23, 29; 23, 30; 26, 31; 28, 32; 28, 33; 31, 34; 31, 35; 33, 36; 34, 37; 34, 38; 35, 39; 36, 40; 39, 41; 39, 42 | def variables(self) -> Set[str]:
"""A set of all different |Node.variable| values of the |Node|
objects directly connected to the actual |Element| object.
Suppose there is an element connected to five nodes, which (partly)
represent different variables:
>>> from hydpy import Element, Node
>>> element = Element('Test',
... inlets=(Node('N1', 'X'), Node('N2', 'Y1')),
... outlets=(Node('N3', 'X'), Node('N4', 'Y2')),
... receivers=(Node('N5', 'X'), Node('N6', 'Y3')),
... senders=(Node('N7', 'X'), Node('N8', 'Y4')))
Property |Element.variables| puts all the different variables of
these nodes together:
>>> sorted(element.variables)
['X', 'Y1', 'Y2', 'Y3', 'Y4']
"""
variables: Set[str] = set()
for connection in self.__connections:
variables.update(connection.variables)
return variables |
0, module; 1, function_definition; 2, function_name:model2subs2seqs; 3, parameters; 4, type; 5, block; 6, identifier:self; 7, generic_type; 8, expression_statement; 9, expression_statement; 10, for_statement; 11, return_statement; 12, identifier:Dict; 13, type_parameter; 14, comment:"""A nested |collections.defaultdict| containing the model specific
information provided by the XML `sequences` element.
>>> from hydpy.auxs.xmltools import XMLInterface
>>> from hydpy import data
>>> interface = XMLInterface('single_run.xml', data.get_path('LahnH'))
>>> series_io = interface.series_io
>>> model2subs2seqs = series_io.writers[2].model2subs2seqs
>>> for model, subs2seqs in sorted(model2subs2seqs.items()):
... for subs, seq in sorted(subs2seqs.items()):
... print(model, subs, seq)
hland_v1 fluxes ['pc', 'tf']
hland_v1 states ['sm']
hstream_v1 states ['qjoints']
"""; 15, assignment; 16, identifier:model; 17, call; 18, block; 19, identifier:model2subs2seqs; 20, type; 21, type; 22, identifier:model2subs2seqs; 23, call; 24, attribute; 25, argument_list; 26, expression_statement; 27, if_statement; 28, for_statement; 29, identifier:str; 30, generic_type; 31, attribute; 32, argument_list; 33, identifier:self; 34, identifier:find; 35, string; 36, assignment; 37, comparison_operator:model_name == 'node'; 38, block; 39, identifier:group; 40, identifier:model; 41, block; 42, identifier:Dict; 43, type_parameter; 44, identifier:collections; 45, identifier:defaultdict; 46, lambda; 47, string_content:sequences; 48, identifier:model_name; 49, call; 50, identifier:model_name; 51, string; 52, continue_statement; 53, expression_statement; 54, for_statement; 55, type; 56, type; 57, call; 58, identifier:strip; 59, argument_list; 60, string_content:node; 61, assignment; 62, identifier:sequence; 63, identifier:group; 64, block; 65, identifier:str; 66, generic_type; 67, attribute; 68, argument_list; 69, attribute; 70, identifier:group_name; 71, call; 72, expression_statement; 73, expression_statement; 74, identifier:List; 75, type_parameter; 76, identifier:collections; 77, identifier:defaultdict; 78, identifier:list; 79, identifier:model; 80, identifier:tag; 81, identifier:strip; 82, argument_list; 83, assignment; 84, call; 85, type; 86, attribute; 87, identifier:seq_name; 88, call; 89, attribute; 90, argument_list; 91, identifier:str; 92, identifier:group; 93, identifier:tag; 94, identifier:strip; 95, argument_list; 96, subscript; 97, identifier:append; 98, identifier:seq_name; 99, attribute; 100, subscript; 101, identifier:group_name; 102, identifier:sequence; 103, identifier:tag; 104, identifier:model2subs2seqs; 105, identifier:model_name | 0, 1; 1, 2; 1, 3; 1, 4; 1, 5; 3, 6; 4, 7; 5, 8; 5, 9; 5, 10; 5, 11; 7, 12; 7, 13; 8, 14; 9, 15; 10, 16; 10, 17; 10, 18; 11, 19; 13, 20; 13, 21; 15, 22; 15, 23; 17, 24; 17, 25; 18, 26; 18, 27; 18, 28; 20, 29; 21, 30; 23, 31; 23, 32; 24, 33; 24, 34; 25, 35; 26, 36; 27, 37; 27, 38; 28, 39; 28, 40; 28, 41; 30, 42; 30, 43; 31, 44; 31, 45; 32, 46; 35, 47; 36, 48; 36, 49; 37, 50; 37, 51; 38, 52; 41, 53; 41, 54; 43, 55; 43, 56; 46, 57; 49, 58; 49, 59; 51, 60; 53, 61; 54, 62; 54, 63; 54, 64; 55, 65; 56, 66; 57, 67; 57, 68; 59, 69; 61, 70; 61, 71; 64, 72; 64, 73; 66, 74; 66, 75; 67, 76; 67, 77; 68, 78; 69, 79; 69, 80; 71, 81; 71, 82; 72, 83; 73, 84; 75, 85; 82, 86; 83, 87; 83, 88; 84, 89; 84, 90; 85, 91; 86, 92; 86, 93; 88, 94; 88, 95; 89, 96; 89, 97; 90, 98; 95, 99; 96, 100; 96, 101; 99, 102; 99, 103; 100, 104; 100, 105 | def model2subs2seqs(self) -> Dict[str, Dict[str, List[str]]]:
"""A nested |collections.defaultdict| containing the model specific
information provided by the XML `sequences` element.
>>> from hydpy.auxs.xmltools import XMLInterface
>>> from hydpy import data
>>> interface = XMLInterface('single_run.xml', data.get_path('LahnH'))
>>> series_io = interface.series_io
>>> model2subs2seqs = series_io.writers[2].model2subs2seqs
>>> for model, subs2seqs in sorted(model2subs2seqs.items()):
... for subs, seq in sorted(subs2seqs.items()):
... print(model, subs, seq)
hland_v1 fluxes ['pc', 'tf']
hland_v1 states ['sm']
hstream_v1 states ['qjoints']
"""
model2subs2seqs = collections.defaultdict(
lambda: collections.defaultdict(list))
for model in self.find('sequences'):
model_name = strip(model.tag)
if model_name == 'node':
continue
for group in model:
group_name = strip(group.tag)
for sequence in group:
seq_name = strip(sequence.tag)
model2subs2seqs[model_name][group_name].append(seq_name)
return model2subs2seqs |
0, module; 1, function_definition; 2, function_name:subs2seqs; 3, parameters; 4, type; 5, block; 6, identifier:self; 7, generic_type; 8, expression_statement; 9, expression_statement; 10, expression_statement; 11, if_statement; 12, return_statement; 13, identifier:Dict; 14, type_parameter; 15, comment:"""A |collections.defaultdict| containing the node-specific
information provided by XML `sequences` element.
>>> from hydpy.auxs.xmltools import XMLInterface
>>> from hydpy import data
>>> interface = XMLInterface('single_run.xml', data.get_path('LahnH'))
>>> series_io = interface.series_io
>>> subs2seqs = series_io.writers[2].subs2seqs
>>> for subs, seq in sorted(subs2seqs.items()):
... print(subs, seq)
node ['sim', 'obs']
"""; 16, assignment; 17, assignment; 18, comparison_operator:nodes is not None; 19, block; 20, identifier:subs2seqs; 21, type; 22, type; 23, identifier:subs2seqs; 24, call; 25, identifier:nodes; 26, call; 27, identifier:nodes; 28, None; 29, for_statement; 30, identifier:str; 31, generic_type; 32, attribute; 33, argument_list; 34, identifier:find; 35, argument_list; 36, identifier:seq; 37, identifier:nodes; 38, block; 39, identifier:List; 40, type_parameter; 41, identifier:collections; 42, identifier:defaultdict; 43, identifier:list; 44, call; 45, string; 46, expression_statement; 47, type; 48, attribute; 49, argument_list; 50, string_content:node; 51, call; 52, identifier:str; 53, identifier:self; 54, identifier:find; 55, string; 56, attribute; 57, argument_list; 58, string_content:sequences; 59, subscript; 60, identifier:append; 61, call; 62, identifier:subs2seqs; 63, string; 64, identifier:strip; 65, argument_list; 66, string_content:node; 67, attribute; 68, identifier:seq; 69, identifier:tag | 0, 1; 1, 2; 1, 3; 1, 4; 1, 5; 3, 6; 4, 7; 5, 8; 5, 9; 5, 10; 5, 11; 5, 12; 7, 13; 7, 14; 8, 15; 9, 16; 10, 17; 11, 18; 11, 19; 12, 20; 14, 21; 14, 22; 16, 23; 16, 24; 17, 25; 17, 26; 18, 27; 18, 28; 19, 29; 21, 30; 22, 31; 24, 32; 24, 33; 26, 34; 26, 35; 29, 36; 29, 37; 29, 38; 31, 39; 31, 40; 32, 41; 32, 42; 33, 43; 35, 44; 35, 45; 38, 46; 40, 47; 44, 48; 44, 49; 45, 50; 46, 51; 47, 52; 48, 53; 48, 54; 49, 55; 51, 56; 51, 57; 55, 58; 56, 59; 56, 60; 57, 61; 59, 62; 59, 63; 61, 64; 61, 65; 63, 66; 65, 67; 67, 68; 67, 69 | def subs2seqs(self) -> Dict[str, List[str]]:
"""A |collections.defaultdict| containing the node-specific
information provided by XML `sequences` element.
>>> from hydpy.auxs.xmltools import XMLInterface
>>> from hydpy import data
>>> interface = XMLInterface('single_run.xml', data.get_path('LahnH'))
>>> series_io = interface.series_io
>>> subs2seqs = series_io.writers[2].subs2seqs
>>> for subs, seq in sorted(subs2seqs.items()):
... print(subs, seq)
node ['sim', 'obs']
"""
subs2seqs = collections.defaultdict(list)
nodes = find(self.find('sequences'), 'node')
if nodes is not None:
for seq in nodes:
subs2seqs['node'].append(strip(seq.tag))
return subs2seqs |
0, module; 1, function_definition; 2, function_name:get_modelnames; 3, parameters; 4, type; 5, block; 6, generic_type; 7, expression_statement; 8, return_statement; 9, identifier:List; 10, type_parameter; 11, comment:"""Return a sorted |list| containing all application model names.
>>> from hydpy.auxs.xmltools import XSDWriter
>>> print(XSDWriter.get_modelnames()) # doctest: +ELLIPSIS
[...'dam_v001', 'dam_v002', 'dam_v003', 'dam_v004', 'dam_v005',...]
"""; 12, call; 13, type; 14, identifier:sorted; 15, generator_expression; 16, identifier:str; 17, call; 18, for_in_clause; 19, if_clause; 20, identifier:str; 21, argument_list; 22, identifier:fn; 23, call; 24, parenthesized_expression; 25, subscript; 26, attribute; 27, argument_list; 28, boolean_operator; 29, call; 30, integer:0; 31, identifier:os; 32, identifier:listdir; 33, subscript; 34, call; 35, parenthesized_expression; 36, attribute; 37, argument_list; 38, attribute; 39, integer:0; 40, attribute; 41, argument_list; 42, comparison_operator:fn != '__init__.py'; 43, identifier:fn; 44, identifier:split; 45, string; 46, identifier:models; 47, identifier:__path__; 48, identifier:fn; 49, identifier:endswith; 50, string; 51, identifier:fn; 52, string; 53, string_content:.; 54, string_content:.py; 55, string_content:__init__.py | 0, 1; 1, 2; 1, 3; 1, 4; 1, 5; 4, 6; 5, 7; 5, 8; 6, 9; 6, 10; 7, 11; 8, 12; 10, 13; 12, 14; 12, 15; 13, 16; 15, 17; 15, 18; 15, 19; 17, 20; 17, 21; 18, 22; 18, 23; 19, 24; 21, 25; 23, 26; 23, 27; 24, 28; 25, 29; 25, 30; 26, 31; 26, 32; 27, 33; 28, 34; 28, 35; 29, 36; 29, 37; 33, 38; 33, 39; 34, 40; 34, 41; 35, 42; 36, 43; 36, 44; 37, 45; 38, 46; 38, 47; 40, 48; 40, 49; 41, 50; 42, 51; 42, 52; 45, 53; 50, 54; 52, 55 | def get_modelnames() -> List[str]:
"""Return a sorted |list| containing all application model names.
>>> from hydpy.auxs.xmltools import XSDWriter
>>> print(XSDWriter.get_modelnames()) # doctest: +ELLIPSIS
[...'dam_v001', 'dam_v002', 'dam_v003', 'dam_v004', 'dam_v005',...]
"""
return sorted(str(fn.split('.')[0])
for fn in os.listdir(models.__path__[0])
if (fn.endswith('.py') and (fn != '__init__.py'))) |
0, module; 1, function_definition; 2, function_name:toys; 3, parameters; 4, type; 5, block; 6, identifier:self; 7, generic_type; 8, expression_statement; 9, return_statement; 10, identifier:Tuple; 11, type_parameter; 12, comment:"""A sorted |tuple| of all contained |TOY| objects."""; 13, call; 14, type; 15, type; 16, identifier:tuple; 17, generator_expression; 18, attribute; 19, ellipsis:...; 20, identifier:toy; 21, for_in_clause; 22, identifier:timetools; 23, identifier:TOY; 24, tuple_pattern; 25, identifier:self; 26, identifier:toy; 27, identifier:_ | 0, 1; 1, 2; 1, 3; 1, 4; 1, 5; 3, 6; 4, 7; 5, 8; 5, 9; 7, 10; 7, 11; 8, 12; 9, 13; 11, 14; 11, 15; 13, 16; 13, 17; 14, 18; 15, 19; 17, 20; 17, 21; 18, 22; 18, 23; 21, 24; 21, 25; 24, 26; 24, 27 | def toys(self) -> Tuple[timetools.TOY, ...]:
"""A sorted |tuple| of all contained |TOY| objects."""
return tuple(toy for (toy, _) in self) |
0, module; 1, function_definition; 2, function_name:collect_variables; 3, parameters; 4, type; 5, block; 6, identifier:self; 7, identifier:selections; 8, None; 9, expression_statement; 10, expression_statement; 11, comment:"""Apply method |ExchangeItem.insert_variables| to collect the
relevant target variables handled by the devices of the given
|Selections| object.
We prepare the `LahnH` example project to be able to use its
|Selections| object:
>>> from hydpy.core.examples import prepare_full_example_2
>>> hp, pub, TestIO = prepare_full_example_2()
We change the type of a specific application model to the type
of its base model for reasons explained later:
>>> from hydpy.models.hland import Model
>>> hp.elements.land_lahn_3.model.__class__ = Model
We prepare a |SetItem| as an example, handling all |hland_states.Ic|
sequences corresponding to any application models derived from |hland|:
>>> from hydpy import SetItem
>>> item = SetItem('ic', 'hland', 'states.ic', 0)
>>> item.targetspecs
ExchangeSpecification('hland', 'states.ic')
Applying method |ExchangeItem.collect_variables| connects the |SetItem|
object with all four relevant |hland_states.Ic| objects:
>>> item.collect_variables(pub.selections)
>>> land_dill = hp.elements.land_dill
>>> sequence = land_dill.model.sequences.states.ic
>>> item.device2target[land_dill] is sequence
True
>>> for element in sorted(item.device2target, key=lambda x: x.name):
... print(element)
land_dill
land_lahn_1
land_lahn_2
land_lahn_3
Asking for |hland_states.Ic| objects corresponding to application
model |hland_v1| only, results in skipping the |Element| `land_lahn_3`
(handling the |hland| base model due to the hack above):
>>> item = SetItem('ic', 'hland_v1', 'states.ic', 0)
>>> item.collect_variables(pub.selections)
>>> for element in sorted(item.device2target, key=lambda x: x.name):
... print(element)
land_dill
land_lahn_1
land_lahn_2
Selecting a series of a variable instead of the variable itself
only affects the `targetspec` attribute:
>>> item = SetItem('t', 'hland_v1', 'inputs.t.series', 0)
>>> item.collect_variables(pub.selections)
>>> item.targetspecs
ExchangeSpecification('hland_v1', 'inputs.t.series')
>>> sequence = land_dill.model.sequences.inputs.t
>>> item.device2target[land_dill] is sequence
True
It is both possible to address sequences of |Node| objects, as well
as their time series, by arguments "node" and "nodes":
>>> item = SetItem('sim', 'node', 'sim', 0)
>>> item.collect_variables(pub.selections)
>>> dill = hp.nodes.dill
>>> item.targetspecs
ExchangeSpecification('node', 'sim')
>>> item.device2target[dill] is dill.sequences.sim
True
>>> for node in sorted(item.device2target, key=lambda x: x.name):
... print(node)
dill
lahn_1
lahn_2
lahn_3
>>> item = SetItem('sim', 'nodes', 'sim.series', 0)
>>> item.collect_variables(pub.selections)
>>> item.targetspecs
ExchangeSpecification('nodes', 'sim.series')
>>> for node in sorted(item.device2target, key=lambda x: x.name):
... print(node)
dill
lahn_1
lahn_2
lahn_3
"""; 12, call; 13, attribute; 14, argument_list; 15, identifier:self; 16, identifier:insert_variables; 17, attribute; 18, attribute; 19, identifier:selections; 20, identifier:self; 21, identifier:device2target; 22, identifier:self; 23, identifier:targetspecs | 0, 1; 1, 2; 1, 3; 1, 4; 1, 5; 3, 6; 3, 7; 4, 8; 5, 9; 5, 10; 9, 11; 10, 12; 12, 13; 12, 14; 13, 15; 13, 16; 14, 17; 14, 18; 14, 19; 17, 20; 17, 21; 18, 22; 18, 23 | def collect_variables(self, selections) -> None:
"""Apply method |ExchangeItem.insert_variables| to collect the
relevant target variables handled by the devices of the given
|Selections| object.
We prepare the `LahnH` example project to be able to use its
|Selections| object:
>>> from hydpy.core.examples import prepare_full_example_2
>>> hp, pub, TestIO = prepare_full_example_2()
We change the type of a specific application model to the type
of its base model for reasons explained later:
>>> from hydpy.models.hland import Model
>>> hp.elements.land_lahn_3.model.__class__ = Model
We prepare a |SetItem| as an example, handling all |hland_states.Ic|
sequences corresponding to any application models derived from |hland|:
>>> from hydpy import SetItem
>>> item = SetItem('ic', 'hland', 'states.ic', 0)
>>> item.targetspecs
ExchangeSpecification('hland', 'states.ic')
Applying method |ExchangeItem.collect_variables| connects the |SetItem|
object with all four relevant |hland_states.Ic| objects:
>>> item.collect_variables(pub.selections)
>>> land_dill = hp.elements.land_dill
>>> sequence = land_dill.model.sequences.states.ic
>>> item.device2target[land_dill] is sequence
True
>>> for element in sorted(item.device2target, key=lambda x: x.name):
... print(element)
land_dill
land_lahn_1
land_lahn_2
land_lahn_3
Asking for |hland_states.Ic| objects corresponding to application
model |hland_v1| only, results in skipping the |Element| `land_lahn_3`
(handling the |hland| base model due to the hack above):
>>> item = SetItem('ic', 'hland_v1', 'states.ic', 0)
>>> item.collect_variables(pub.selections)
>>> for element in sorted(item.device2target, key=lambda x: x.name):
... print(element)
land_dill
land_lahn_1
land_lahn_2
Selecting a series of a variable instead of the variable itself
only affects the `targetspec` attribute:
>>> item = SetItem('t', 'hland_v1', 'inputs.t.series', 0)
>>> item.collect_variables(pub.selections)
>>> item.targetspecs
ExchangeSpecification('hland_v1', 'inputs.t.series')
>>> sequence = land_dill.model.sequences.inputs.t
>>> item.device2target[land_dill] is sequence
True
It is both possible to address sequences of |Node| objects, as well
as their time series, by arguments "node" and "nodes":
>>> item = SetItem('sim', 'node', 'sim', 0)
>>> item.collect_variables(pub.selections)
>>> dill = hp.nodes.dill
>>> item.targetspecs
ExchangeSpecification('node', 'sim')
>>> item.device2target[dill] is dill.sequences.sim
True
>>> for node in sorted(item.device2target, key=lambda x: x.name):
... print(node)
dill
lahn_1
lahn_2
lahn_3
>>> item = SetItem('sim', 'nodes', 'sim.series', 0)
>>> item.collect_variables(pub.selections)
>>> item.targetspecs
ExchangeSpecification('nodes', 'sim.series')
>>> for node in sorted(item.device2target, key=lambda x: x.name):
... print(node)
dill
lahn_1
lahn_2
lahn_3
"""
self.insert_variables(self.device2target, self.targetspecs, selections) |
0, module; 1, function_definition; 2, function_name:collect_variables; 3, parameters; 4, type; 5, block; 6, identifier:self; 7, identifier:selections; 8, None; 9, expression_statement; 10, expression_statement; 11, expression_statement; 12, comment:"""Apply method |ChangeItem.collect_variables| of the base class
|ChangeItem| and also apply method |ExchangeItem.insert_variables|
of class |ExchangeItem| to collect the relevant base variables
handled by the devices of the given |Selections| object.
>>> from hydpy.core.examples import prepare_full_example_2
>>> hp, pub, TestIO = prepare_full_example_2()
>>> from hydpy import AddItem
>>> item = AddItem(
... 'alpha', 'hland_v1', 'control.sfcf', 'control.rfcf', 0)
>>> item.collect_variables(pub.selections)
>>> land_dill = hp.elements.land_dill
>>> control = land_dill.model.parameters.control
>>> item.device2target[land_dill] is control.sfcf
True
>>> item.device2base[land_dill] is control.rfcf
True
>>> for device in sorted(item.device2base, key=lambda x: x.name):
... print(device)
land_dill
land_lahn_1
land_lahn_2
land_lahn_3
"""; 13, call; 14, call; 15, attribute; 16, argument_list; 17, attribute; 18, argument_list; 19, call; 20, identifier:collect_variables; 21, identifier:selections; 22, identifier:self; 23, identifier:insert_variables; 24, attribute; 25, attribute; 26, identifier:selections; 27, identifier:super; 28, argument_list; 29, identifier:self; 30, identifier:device2base; 31, identifier:self; 32, identifier:basespecs | 0, 1; 1, 2; 1, 3; 1, 4; 1, 5; 3, 6; 3, 7; 4, 8; 5, 9; 5, 10; 5, 11; 9, 12; 10, 13; 11, 14; 13, 15; 13, 16; 14, 17; 14, 18; 15, 19; 15, 20; 16, 21; 17, 22; 17, 23; 18, 24; 18, 25; 18, 26; 19, 27; 19, 28; 24, 29; 24, 30; 25, 31; 25, 32 | def collect_variables(self, selections) -> None:
"""Apply method |ChangeItem.collect_variables| of the base class
|ChangeItem| and also apply method |ExchangeItem.insert_variables|
of class |ExchangeItem| to collect the relevant base variables
handled by the devices of the given |Selections| object.
>>> from hydpy.core.examples import prepare_full_example_2
>>> hp, pub, TestIO = prepare_full_example_2()
>>> from hydpy import AddItem
>>> item = AddItem(
... 'alpha', 'hland_v1', 'control.sfcf', 'control.rfcf', 0)
>>> item.collect_variables(pub.selections)
>>> land_dill = hp.elements.land_dill
>>> control = land_dill.model.parameters.control
>>> item.device2target[land_dill] is control.sfcf
True
>>> item.device2base[land_dill] is control.rfcf
True
>>> for device in sorted(item.device2base, key=lambda x: x.name):
... print(device)
land_dill
land_lahn_1
land_lahn_2
land_lahn_3
"""
super().collect_variables(selections)
self.insert_variables(self.device2base, self.basespecs, selections) |
0, module; 1, function_definition; 2, function_name:save_controls; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, expression_statement; 10, expression_statement; 11, identifier:parameterstep; 12, None; 13, identifier:simulationstep; 14, None; 15, identifier:auxfiler; 16, None; 17, comment:"""Call method |Elements.save_controls| of the |Elements| object
currently handled by the |HydPy| object.
We use the `LahnH` example project to demonstrate how to write
a complete set parameter control files. For convenience, we let
function |prepare_full_example_2| prepare a fully functional
|HydPy| object, handling seven |Element| objects controlling
four |hland_v1| and three |hstream_v1| application models:
>>> from hydpy.core.examples import prepare_full_example_2
>>> hp, pub, TestIO = prepare_full_example_2()
At first, there is only one control subfolder named "default",
containing the seven control files used in the step above:
>>> import os
>>> with TestIO():
... os.listdir('LahnH/control')
['default']
Next, we use the |ControlManager| to create a new directory
and dump all control file into it:
>>> with TestIO():
... pub.controlmanager.currentdir = 'newdir'
... hp.save_controls()
... sorted(os.listdir('LahnH/control'))
['default', 'newdir']
We focus our examples on the (smaller) control files of
application model |hstream_v1|. The values of parameter
|hstream_control.Lag| and |hstream_control.Damp| for the
river channel connecting the outlets of subcatchment `lahn_1`
and `lahn_2` are 0.583 days and 0.0, respectively:
>>> model = hp.elements.stream_lahn_1_lahn_2.model
>>> model.parameters.control
lag(0.583)
damp(0.0)
The corresponding written control file defines the same values:
>>> dir_ = 'LahnH/control/newdir/'
>>> with TestIO():
... with open(dir_ + 'stream_lahn_1_lahn_2.py') as controlfile:
... print(controlfile.read())
# -*- coding: utf-8 -*-
<BLANKLINE>
from hydpy.models.hstream_v1 import *
<BLANKLINE>
simulationstep('1d')
parameterstep('1d')
<BLANKLINE>
lag(0.583)
damp(0.0)
<BLANKLINE>
Its name equals the element name and the time step information
is taken for the |Timegrid| object available via |pub|:
>>> pub.timegrids.stepsize
Period('1d')
Use the |Auxfiler| class To avoid redefining the same parameter
values in multiple control files. Here, we prepare an |Auxfiler|
object which handles the two parameters of the model discussed
above:
>>> from hydpy import Auxfiler
>>> aux = Auxfiler()
>>> aux += 'hstream_v1'
>>> aux.hstream_v1.stream = model.parameters.control.damp
>>> aux.hstream_v1.stream = model.parameters.control.lag
When passing the |Auxfiler| object to |HydPy.save_controls|,
both parameters the control file of element `stream_lahn_1_lahn_2`
do not define their values on their own, but reference the
auxiliary file `stream.py` instead:
>>> with TestIO():
... pub.controlmanager.currentdir = 'newdir'
... hp.save_controls(auxfiler=aux)
... with open(dir_ + 'stream_lahn_1_lahn_2.py') as controlfile:
... print(controlfile.read())
# -*- coding: utf-8 -*-
<BLANKLINE>
from hydpy.models.hstream_v1 import *
<BLANKLINE>
simulationstep('1d')
parameterstep('1d')
<BLANKLINE>
lag(auxfile='stream')
damp(auxfile='stream')
<BLANKLINE>
`stream.py` contains the actual value definitions:
>>> with TestIO():
... with open(dir_ + 'stream.py') as controlfile:
... print(controlfile.read())
# -*- coding: utf-8 -*-
<BLANKLINE>
from hydpy.models.hstream_v1 import *
<BLANKLINE>
simulationstep('1d')
parameterstep('1d')
<BLANKLINE>
damp(0.0)
lag(0.583)
<BLANKLINE>
The |hstream_v1| model of element `stream_lahn_2_lahn_3` defines
the same value for parameter |hstream_control.Damp| but a different
one for parameter |hstream_control.Lag|. Hence, only
|hstream_control.Damp| can reference control file `stream.py`
without distorting data:
>>> with TestIO():
... with open(dir_ + 'stream_lahn_2_lahn_3.py') as controlfile:
... print(controlfile.read())
# -*- coding: utf-8 -*-
<BLANKLINE>
from hydpy.models.hstream_v1 import *
<BLANKLINE>
simulationstep('1d')
parameterstep('1d')
<BLANKLINE>
lag(0.417)
damp(auxfile='stream')
<BLANKLINE>
Another option is to pass alternative step size information.
The `simulationstep` information, which is not really required
in control files but useful for testing them, has no impact
on the written data. However, passing an alternative
`parameterstep` information changes the written values of
time dependent parameters both in the primary and the auxiliary
control files, as to be expected:
>>> with TestIO():
... pub.controlmanager.currentdir = 'newdir'
... hp.save_controls(
... auxfiler=aux, parameterstep='2d', simulationstep='1h')
... with open(dir_ + 'stream_lahn_1_lahn_2.py') as controlfile:
... print(controlfile.read())
# -*- coding: utf-8 -*-
<BLANKLINE>
from hydpy.models.hstream_v1 import *
<BLANKLINE>
simulationstep('1h')
parameterstep('2d')
<BLANKLINE>
lag(auxfile='stream')
damp(auxfile='stream')
<BLANKLINE>
>>> with TestIO():
... with open(dir_ + 'stream.py') as controlfile:
... print(controlfile.read())
# -*- coding: utf-8 -*-
<BLANKLINE>
from hydpy.models.hstream_v1 import *
<BLANKLINE>
simulationstep('1h')
parameterstep('2d')
<BLANKLINE>
damp(0.0)
lag(0.2915)
<BLANKLINE>
>>> with TestIO():
... with open(dir_ + 'stream_lahn_2_lahn_3.py') as controlfile:
... print(controlfile.read())
# -*- coding: utf-8 -*-
<BLANKLINE>
from hydpy.models.hstream_v1 import *
<BLANKLINE>
simulationstep('1h')
parameterstep('2d')
<BLANKLINE>
lag(0.2085)
damp(auxfile='stream')
<BLANKLINE>
"""; 18, call; 19, attribute; 20, argument_list; 21, attribute; 22, identifier:save_controls; 23, keyword_argument; 24, keyword_argument; 25, keyword_argument; 26, identifier:self; 27, identifier:elements; 28, identifier:parameterstep; 29, identifier:parameterstep; 30, identifier:simulationstep; 31, identifier:simulationstep; 32, identifier:auxfiler; 33, identifier:auxfiler | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 6, 11; 6, 12; 7, 13; 7, 14; 8, 15; 8, 16; 9, 17; 10, 18; 18, 19; 18, 20; 19, 21; 19, 22; 20, 23; 20, 24; 20, 25; 21, 26; 21, 27; 23, 28; 23, 29; 24, 30; 24, 31; 25, 32; 25, 33 | def save_controls(self, parameterstep=None, simulationstep=None,
auxfiler=None):
"""Call method |Elements.save_controls| of the |Elements| object
currently handled by the |HydPy| object.
We use the `LahnH` example project to demonstrate how to write
a complete set parameter control files. For convenience, we let
function |prepare_full_example_2| prepare a fully functional
|HydPy| object, handling seven |Element| objects controlling
four |hland_v1| and three |hstream_v1| application models:
>>> from hydpy.core.examples import prepare_full_example_2
>>> hp, pub, TestIO = prepare_full_example_2()
At first, there is only one control subfolder named "default",
containing the seven control files used in the step above:
>>> import os
>>> with TestIO():
... os.listdir('LahnH/control')
['default']
Next, we use the |ControlManager| to create a new directory
and dump all control file into it:
>>> with TestIO():
... pub.controlmanager.currentdir = 'newdir'
... hp.save_controls()
... sorted(os.listdir('LahnH/control'))
['default', 'newdir']
We focus our examples on the (smaller) control files of
application model |hstream_v1|. The values of parameter
|hstream_control.Lag| and |hstream_control.Damp| for the
river channel connecting the outlets of subcatchment `lahn_1`
and `lahn_2` are 0.583 days and 0.0, respectively:
>>> model = hp.elements.stream_lahn_1_lahn_2.model
>>> model.parameters.control
lag(0.583)
damp(0.0)
The corresponding written control file defines the same values:
>>> dir_ = 'LahnH/control/newdir/'
>>> with TestIO():
... with open(dir_ + 'stream_lahn_1_lahn_2.py') as controlfile:
... print(controlfile.read())
# -*- coding: utf-8 -*-
<BLANKLINE>
from hydpy.models.hstream_v1 import *
<BLANKLINE>
simulationstep('1d')
parameterstep('1d')
<BLANKLINE>
lag(0.583)
damp(0.0)
<BLANKLINE>
Its name equals the element name and the time step information
is taken for the |Timegrid| object available via |pub|:
>>> pub.timegrids.stepsize
Period('1d')
Use the |Auxfiler| class To avoid redefining the same parameter
values in multiple control files. Here, we prepare an |Auxfiler|
object which handles the two parameters of the model discussed
above:
>>> from hydpy import Auxfiler
>>> aux = Auxfiler()
>>> aux += 'hstream_v1'
>>> aux.hstream_v1.stream = model.parameters.control.damp
>>> aux.hstream_v1.stream = model.parameters.control.lag
When passing the |Auxfiler| object to |HydPy.save_controls|,
both parameters the control file of element `stream_lahn_1_lahn_2`
do not define their values on their own, but reference the
auxiliary file `stream.py` instead:
>>> with TestIO():
... pub.controlmanager.currentdir = 'newdir'
... hp.save_controls(auxfiler=aux)
... with open(dir_ + 'stream_lahn_1_lahn_2.py') as controlfile:
... print(controlfile.read())
# -*- coding: utf-8 -*-
<BLANKLINE>
from hydpy.models.hstream_v1 import *
<BLANKLINE>
simulationstep('1d')
parameterstep('1d')
<BLANKLINE>
lag(auxfile='stream')
damp(auxfile='stream')
<BLANKLINE>
`stream.py` contains the actual value definitions:
>>> with TestIO():
... with open(dir_ + 'stream.py') as controlfile:
... print(controlfile.read())
# -*- coding: utf-8 -*-
<BLANKLINE>
from hydpy.models.hstream_v1 import *
<BLANKLINE>
simulationstep('1d')
parameterstep('1d')
<BLANKLINE>
damp(0.0)
lag(0.583)
<BLANKLINE>
The |hstream_v1| model of element `stream_lahn_2_lahn_3` defines
the same value for parameter |hstream_control.Damp| but a different
one for parameter |hstream_control.Lag|. Hence, only
|hstream_control.Damp| can reference control file `stream.py`
without distorting data:
>>> with TestIO():
... with open(dir_ + 'stream_lahn_2_lahn_3.py') as controlfile:
... print(controlfile.read())
# -*- coding: utf-8 -*-
<BLANKLINE>
from hydpy.models.hstream_v1 import *
<BLANKLINE>
simulationstep('1d')
parameterstep('1d')
<BLANKLINE>
lag(0.417)
damp(auxfile='stream')
<BLANKLINE>
Another option is to pass alternative step size information.
The `simulationstep` information, which is not really required
in control files but useful for testing them, has no impact
on the written data. However, passing an alternative
`parameterstep` information changes the written values of
time dependent parameters both in the primary and the auxiliary
control files, as to be expected:
>>> with TestIO():
... pub.controlmanager.currentdir = 'newdir'
... hp.save_controls(
... auxfiler=aux, parameterstep='2d', simulationstep='1h')
... with open(dir_ + 'stream_lahn_1_lahn_2.py') as controlfile:
... print(controlfile.read())
# -*- coding: utf-8 -*-
<BLANKLINE>
from hydpy.models.hstream_v1 import *
<BLANKLINE>
simulationstep('1h')
parameterstep('2d')
<BLANKLINE>
lag(auxfile='stream')
damp(auxfile='stream')
<BLANKLINE>
>>> with TestIO():
... with open(dir_ + 'stream.py') as controlfile:
... print(controlfile.read())
# -*- coding: utf-8 -*-
<BLANKLINE>
from hydpy.models.hstream_v1 import *
<BLANKLINE>
simulationstep('1h')
parameterstep('2d')
<BLANKLINE>
damp(0.0)
lag(0.2915)
<BLANKLINE>
>>> with TestIO():
... with open(dir_ + 'stream_lahn_2_lahn_3.py') as controlfile:
... print(controlfile.read())
# -*- coding: utf-8 -*-
<BLANKLINE>
from hydpy.models.hstream_v1 import *
<BLANKLINE>
simulationstep('1h')
parameterstep('2d')
<BLANKLINE>
lag(0.2085)
damp(auxfile='stream')
<BLANKLINE>
"""
self.elements.save_controls(parameterstep=parameterstep,
simulationstep=simulationstep,
auxfiler=auxfiler) |
0, module; 1, function_definition; 2, function_name:variables; 3, parameters; 4, block; 5, identifier:self; 6, expression_statement; 7, expression_statement; 8, for_statement; 9, return_statement; 10, comment:"""Sorted list of strings summarizing all variables handled by the
|Node| objects"""; 11, assignment; 12, identifier:node; 13, attribute; 14, block; 15, call; 16, identifier:variables; 17, call; 18, identifier:self; 19, identifier:nodes; 20, expression_statement; 21, identifier:sorted; 22, argument_list; 23, identifier:set; 24, argument_list; 25, call; 26, identifier:variables; 27, list; 28, attribute; 29, argument_list; 30, identifier:variables; 31, identifier:add; 32, attribute; 33, identifier:node; 34, identifier:variable | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 6, 10; 7, 11; 8, 12; 8, 13; 8, 14; 9, 15; 11, 16; 11, 17; 13, 18; 13, 19; 14, 20; 15, 21; 15, 22; 17, 23; 17, 24; 20, 25; 22, 26; 24, 27; 25, 28; 25, 29; 28, 30; 28, 31; 29, 32; 32, 33; 32, 34 | def variables(self):
"""Sorted list of strings summarizing all variables handled by the
|Node| objects"""
variables = set([])
for node in self.nodes:
variables.add(node.variable)
return sorted(variables) |
0, module; 1, function_definition; 2, function_name:sort_timeplaceentries; 3, parameters; 4, type; 5, block; 6, identifier:self; 7, identifier:timeentry; 8, identifier:placeentry; 9, generic_type; 10, expression_statement; 11, if_statement; 12, return_statement; 13, identifier:Tuple; 14, type_parameter; 15, comment:"""Return a |tuple| containing the given `timeentry` and `placeentry`
sorted in agreement with the currently selected `timeaxis`.
>>> from hydpy.core.netcdftools import NetCDFVariableBase
>>> from hydpy import make_abc_testable
>>> NCVar = make_abc_testable(NetCDFVariableBase)
>>> ncvar = NCVar('flux_nkor', isolate=True, timeaxis=1)
>>> ncvar.sort_timeplaceentries('time', 'place')
('place', 'time')
>>> ncvar = NetCDFVariableDeep('test', isolate=False, timeaxis=0)
>>> ncvar.sort_timeplaceentries('time', 'place')
('time', 'place')
"""; 16, attribute; 17, block; 18, expression_list; 19, type; 20, type; 21, identifier:self; 22, identifier:_timeaxis; 23, return_statement; 24, identifier:timeentry; 25, identifier:placeentry; 26, identifier:Any; 27, identifier:Any; 28, expression_list; 29, identifier:placeentry; 30, identifier:timeentry | 0, 1; 1, 2; 1, 3; 1, 4; 1, 5; 3, 6; 3, 7; 3, 8; 4, 9; 5, 10; 5, 11; 5, 12; 9, 13; 9, 14; 10, 15; 11, 16; 11, 17; 12, 18; 14, 19; 14, 20; 16, 21; 16, 22; 17, 23; 18, 24; 18, 25; 19, 26; 20, 27; 23, 28; 28, 29; 28, 30 | def sort_timeplaceentries(self, timeentry, placeentry) -> Tuple[Any, Any]:
"""Return a |tuple| containing the given `timeentry` and `placeentry`
sorted in agreement with the currently selected `timeaxis`.
>>> from hydpy.core.netcdftools import NetCDFVariableBase
>>> from hydpy import make_abc_testable
>>> NCVar = make_abc_testable(NetCDFVariableBase)
>>> ncvar = NCVar('flux_nkor', isolate=True, timeaxis=1)
>>> ncvar.sort_timeplaceentries('time', 'place')
('place', 'time')
>>> ncvar = NetCDFVariableDeep('test', isolate=False, timeaxis=0)
>>> ncvar.sort_timeplaceentries('time', 'place')
('time', 'place')
"""
if self._timeaxis:
return placeentry, timeentry
return timeentry, placeentry |
0, module; 1, function_definition; 2, function_name:prepare_io_example_1; 3, parameters; 4, type; 5, comment:# noinspection PyUnresolvedReferences; 6, block; 7, generic_type; 8, expression_statement; 9, import_from_statement; 10, expression_statement; 11, import_from_statement; 12, expression_statement; 13, with_statement; 14, expression_statement; 15, import_from_statement; 16, expression_statement; 17, expression_statement; 18, expression_statement; 19, expression_statement; 20, expression_statement; 21, expression_statement; 22, expression_statement; 23, import_from_statement; 24, expression_statement; 25, expression_statement; 26, expression_statement; 27, import_from_statement; 28, for_statement; 29, with_statement; 30, function_definition; 31, import_statement; 32, expression_statement; 33, for_statement; 34, for_statement; 35, return_statement; 36, identifier:Tuple; 37, type_parameter; 38, comment:"""Prepare an IO example configuration.
>>> from hydpy.core.examples import prepare_io_example_1
>>> nodes, elements = prepare_io_example_1()
(1) Prepares a short initialisation period of five days:
>>> from hydpy import pub
>>> pub.timegrids
Timegrids(Timegrid('2000-01-01 00:00:00',
'2000-01-05 00:00:00',
'1d'))
(2) Prepares a plain IO testing directory structure:
>>> pub.sequencemanager.inputdirpath
'inputpath'
>>> pub.sequencemanager.fluxdirpath
'outputpath'
>>> pub.sequencemanager.statedirpath
'outputpath'
>>> pub.sequencemanager.nodedirpath
'nodepath'
>>> import os
>>> from hydpy import TestIO
>>> with TestIO():
... print(sorted(filename for filename in os.listdir('.')
... if not filename.startswith('_')))
['inputpath', 'nodepath', 'outputpath']
(3) Returns three |Element| objects handling either application model
|lland_v1| or |lland_v2|, and two |Node| objects handling variables
`Q` and `T`:
>>> for element in elements:
... print(element.name, element.model)
element1 lland_v1
element2 lland_v1
element3 lland_v2
>>> for node in nodes:
... print(node.name, node.variable)
node1 Q
node2 T
(4) Prepares the time series data of the input sequence
|lland_inputs.Nied|, flux sequence |lland_fluxes.NKor|, and state
sequence |lland_states.BoWa| for each model instance, and |Sim| for
each node instance (all values are different), e.g.:
>>> nied1 = elements.element1.model.sequences.inputs.nied
>>> nied1.series
InfoArray([ 0., 1., 2., 3.])
>>> nkor1 = elements.element1.model.sequences.fluxes.nkor
>>> nkor1.series
InfoArray([[ 12.],
[ 13.],
[ 14.],
[ 15.]])
>>> bowa3 = elements.element3.model.sequences.states.bowa
>>> bowa3.series
InfoArray([[ 48., 49., 50.],
[ 51., 52., 53.],
[ 54., 55., 56.],
[ 57., 58., 59.]])
>>> sim2 = nodes.node2.sequences.sim
>>> sim2.series
InfoArray([ 64., 65., 66., 67.])
(5) All sequences carry |numpy.ndarray| objects with (deep) copies
of the time series data for testing:
>>> import numpy
>>> (numpy.all(nied1.series == nied1.testarray) and
... numpy.all(nkor1.series == nkor1.testarray) and
... numpy.all(bowa3.series == bowa3.testarray) and
... numpy.all(sim2.series == sim2.testarray))
InfoArray(True, dtype=bool)
>>> bowa3.series[1, 2] = -999.0
>>> numpy.all(bowa3.series == bowa3.testarray)
InfoArray(False, dtype=bool)
"""; 39, dotted_name; 40, dotted_name; 41, call; 42, dotted_name; 43, dotted_name; 44, assignment; 45, with_clause; 46, block; 47, assignment; 48, dotted_name; 49, dotted_name; 50, dotted_name; 51, dotted_name; 52, dotted_name; 53, dotted_name; 54, assignment; 55, assignment; 56, assignment; 57, assignment; 58, assignment; 59, assignment; 60, assignment; 61, dotted_name; 62, dotted_name; 63, dotted_name; 64, assignment; 65, assignment; 66, assignment; 67, dotted_name; 68, dotted_name; 69, pattern_list; 70, call; 71, block; 72, with_clause; 73, block; 74, function_name:init_values; 75, parameters; 76, block; 77, dotted_name; 78, assignment; 79, pattern_list; 80, call; 81, block; 82, identifier:node; 83, identifier:nodes; 84, block; 85, expression_list; 86, type; 87, type; 88, identifier:hydpy; 89, identifier:TestIO; 90, attribute; 91, argument_list; 92, identifier:hydpy; 93, identifier:core; 94, identifier:filetools; 95, identifier:SequenceManager; 96, attribute; 97, call; 98, with_item; 99, expression_statement; 100, expression_statement; 101, expression_statement; 102, expression_statement; 103, attribute; 104, expression_list; 105, identifier:hydpy; 106, identifier:Node; 107, identifier:Nodes; 108, identifier:Element; 109, identifier:Elements; 110, identifier:prepare_model; 111, identifier:node1; 112, call; 113, identifier:node2; 114, call; 115, identifier:nodes; 116, call; 117, identifier:element1; 118, call; 119, identifier:element2; 120, call; 121, identifier:element3; 122, call; 123, identifier:elements; 124, call; 125, identifier:hydpy; 126, identifier:models; 127, identifier:lland_v1; 128, identifier:lland_v2; 129, attribute; 130, call; 131, attribute; 132, call; 133, attribute; 134, call; 135, identifier:hydpy; 136, identifier:models; 137, identifier:lland; 138, identifier:ACKER; 139, identifier:idx; 140, identifier:element; 141, identifier:enumerate; 142, argument_list; 143, expression_statement; 144, expression_statement; 145, expression_statement; 146, expression_statement; 147, with_item; 148, expression_statement; 149, expression_statement; 150, expression_statement; 151, expression_statement; 152, identifier:seq; 153, identifier:value1_; 154, expression_statement; 155, expression_statement; 156, expression_statement; 157, expression_statement; 158, return_statement; 159, identifier:numpy; 160, identifier:value1; 161, integer:0; 162, identifier:subname; 163, identifier:seqname; 164, identifier:zip; 165, argument_list; 166, for_statement; 167, expression_statement; 168, identifier:nodes; 169, identifier:elements; 170, attribute; 171, attribute; 172, identifier:TestIO; 173, identifier:clear; 174, attribute; 175, identifier:sequencemanager; 176, identifier:SequenceManager; 177, argument_list; 178, call; 179, assignment; 180, assignment; 181, assignment; 182, assignment; 183, attribute; 184, identifier:timegrids; 185, string; 186, string; 187, string; 188, identifier:Node; 189, argument_list; 190, identifier:Node; 191, argument_list; 192, identifier:Nodes; 193, argument_list; 194, identifier:Element; 195, argument_list; 196, identifier:Element; 197, argument_list; 198, identifier:Element; 199, argument_list; 200, identifier:Elements; 201, argument_list; 202, identifier:element1; 203, identifier:model; 204, identifier:prepare_model; 205, argument_list; 206, identifier:element2; 207, identifier:model; 208, identifier:prepare_model; 209, argument_list; 210, identifier:element3; 211, identifier:model; 212, identifier:prepare_model; 213, argument_list; 214, identifier:elements; 215, assignment; 216, call; 217, call; 218, call; 219, call; 220, call; 221, call; 222, call; 223, call; 224, assignment; 225, assignment; 226, assignment; 227, assignment; 228, identifier:value2_; 229, list; 230, list; 231, identifier:element; 232, identifier:elements; 233, block; 234, assignment; 235, identifier:devicetools; 236, identifier:Nodes; 237, identifier:devicetools; 238, identifier:Elements; 239, identifier:hydpy; 240, identifier:pub; 241, identifier:TestIO; 242, argument_list; 243, attribute; 244, string; 245, attribute; 246, string; 247, attribute; 248, string; 249, attribute; 250, string; 251, identifier:hydpy; 252, identifier:pub; 253, string_content:2000-01-01; 254, string_content:2000-01-05; 255, string_content:1d; 256, string; 257, string; 258, keyword_argument; 259, identifier:node1; 260, identifier:node2; 261, string; 262, keyword_argument; 263, string; 264, keyword_argument; 265, string; 266, keyword_argument; 267, identifier:element1; 268, identifier:element2; 269, identifier:element3; 270, identifier:lland_v1; 271, identifier:lland_v1; 272, identifier:lland_v2; 273, identifier:parameters; 274, attribute; 275, attribute; 276, argument_list; 277, attribute; 278, argument_list; 279, attribute; 280, argument_list; 281, attribute; 282, argument_list; 283, attribute; 284, argument_list; 285, attribute; 286, argument_list; 287, attribute; 288, argument_list; 289, attribute; 290, argument_list; 291, identifier:value2_; 292, binary_operator:value1_ + len(seq.series.flatten()); 293, identifier:values_; 294, call; 295, attribute; 296, call; 297, attribute; 298, call; 299, string; 300, string; 301, string; 302, string; 303, string; 304, string; 305, expression_statement; 306, expression_statement; 307, identifier:value1; 308, call; 309, attribute; 310, identifier:inputdirpath; 311, string_content:inputpath; 312, attribute; 313, identifier:fluxdirpath; 314, string_content:outputpath; 315, attribute; 316, identifier:statedirpath; 317, string_content:outputpath; 318, attribute; 319, identifier:nodedirpath; 320, string_content:nodepath; 321, string_content:node1; 322, string_content:node2; 323, identifier:variable; 324, string; 325, string_content:element1; 326, identifier:outlets; 327, identifier:node1; 328, string_content:element2; 329, identifier:outlets; 330, identifier:node1; 331, string_content:element3; 332, identifier:outlets; 333, identifier:node1; 334, attribute; 335, identifier:parameters; 336, attribute; 337, identifier:nhru; 338, binary_operator:idx+1; 339, attribute; 340, identifier:lnk; 341, identifier:ACKER; 342, attribute; 343, identifier:absfhru; 344, float:10.0; 345, attribute; 346, identifier:printprogress; 347, False; 348, identifier:nodes; 349, identifier:prepare_simseries; 350, identifier:elements; 351, identifier:prepare_inputseries; 352, identifier:elements; 353, identifier:prepare_fluxseries; 354, identifier:elements; 355, identifier:prepare_stateseries; 356, identifier:value1_; 357, call; 358, attribute; 359, argument_list; 360, identifier:seq; 361, identifier:testarray; 362, attribute; 363, argument_list; 364, identifier:seq; 365, identifier:series; 366, attribute; 367, argument_list; 368, string_content:inputs; 369, string_content:fluxes; 370, string_content:states; 371, string_content:nied; 372, string_content:nkor; 373, string_content:bowa; 374, assignment; 375, assignment; 376, identifier:init_values; 377, argument_list; 378, attribute; 379, identifier:sequencemanager; 380, attribute; 381, identifier:sequencemanager; 382, attribute; 383, identifier:sequencemanager; 384, attribute; 385, identifier:sequencemanager; 386, string_content:T; 387, identifier:element; 388, identifier:model; 389, identifier:parameters; 390, identifier:control; 391, identifier:idx; 392, integer:1; 393, identifier:parameters; 394, identifier:control; 395, identifier:parameters; 396, identifier:derived; 397, attribute; 398, identifier:options; 399, identifier:len; 400, argument_list; 401, identifier:numpy; 402, identifier:arange; 403, identifier:value1_; 404, identifier:value2_; 405, keyword_argument; 406, identifier:values_; 407, identifier:reshape; 408, attribute; 409, attribute; 410, identifier:copy; 411, identifier:subseqs; 412, call; 413, identifier:value1; 414, call; 415, attribute; 416, identifier:value1; 417, identifier:hydpy; 418, identifier:pub; 419, identifier:hydpy; 420, identifier:pub; 421, identifier:hydpy; 422, identifier:pub; 423, identifier:hydpy; 424, identifier:pub; 425, identifier:hydpy; 426, identifier:pub; 427, call; 428, identifier:dtype; 429, identifier:float; 430, identifier:seq; 431, identifier:seriesshape; 432, identifier:seq; 433, identifier:testarray; 434, identifier:getattr; 435, argument_list; 436, identifier:init_values; 437, argument_list; 438, attribute; 439, identifier:sim; 440, attribute; 441, argument_list; 442, attribute; 443, identifier:subname; 444, call; 445, identifier:value1; 446, identifier:node; 447, identifier:sequences; 448, attribute; 449, identifier:flatten; 450, attribute; 451, identifier:sequences; 452, identifier:getattr; 453, argument_list; 454, identifier:seq; 455, identifier:series; 456, identifier:element; 457, identifier:model; 458, identifier:subseqs; 459, identifier:seqname | 0, 1; 1, 2; 1, 3; 1, 4; 1, 5; 1, 6; 4, 7; 6, 8; 6, 9; 6, 10; 6, 11; 6, 12; 6, 13; 6, 14; 6, 15; 6, 16; 6, 17; 6, 18; 6, 19; 6, 20; 6, 21; 6, 22; 6, 23; 6, 24; 6, 25; 6, 26; 6, 27; 6, 28; 6, 29; 6, 30; 6, 31; 6, 32; 6, 33; 6, 34; 6, 35; 7, 36; 7, 37; 8, 38; 9, 39; 9, 40; 10, 41; 11, 42; 11, 43; 12, 44; 13, 45; 13, 46; 14, 47; 15, 48; 15, 49; 15, 50; 15, 51; 15, 52; 15, 53; 16, 54; 17, 55; 18, 56; 19, 57; 20, 58; 21, 59; 22, 60; 23, 61; 23, 62; 23, 63; 24, 64; 25, 65; 26, 66; 27, 67; 27, 68; 28, 69; 28, 70; 28, 71; 29, 72; 29, 73; 30, 74; 30, 75; 30, 76; 31, 77; 32, 78; 33, 79; 33, 80; 33, 81; 34, 82; 34, 83; 34, 84; 35, 85; 37, 86; 37, 87; 39, 88; 40, 89; 41, 90; 41, 91; 42, 92; 42, 93; 42, 94; 43, 95; 44, 96; 44, 97; 45, 98; 46, 99; 46, 100; 46, 101; 46, 102; 47, 103; 47, 104; 48, 105; 49, 106; 50, 107; 51, 108; 52, 109; 53, 110; 54, 111; 54, 112; 55, 113; 55, 114; 56, 115; 56, 116; 57, 117; 57, 118; 58, 119; 58, 120; 59, 121; 59, 122; 60, 123; 60, 124; 61, 125; 61, 126; 62, 127; 63, 128; 64, 129; 64, 130; 65, 131; 65, 132; 66, 133; 66, 134; 67, 135; 67, 136; 67, 137; 68, 138; 69, 139; 69, 140; 70, 141; 70, 142; 71, 143; 71, 144; 71, 145; 71, 146; 72, 147; 73, 148; 73, 149; 73, 150; 73, 151; 75, 152; 75, 153; 76, 154; 76, 155; 76, 156; 76, 157; 76, 158; 77, 159; 78, 160; 78, 161; 79, 162; 79, 163; 80, 164; 80, 165; 81, 166; 84, 167; 85, 168; 85, 169; 86, 170; 87, 171; 90, 172; 90, 173; 96, 174; 96, 175; 97, 176; 97, 177; 98, 178; 99, 179; 100, 180; 101, 181; 102, 182; 103, 183; 103, 184; 104, 185; 104, 186; 104, 187; 112, 188; 112, 189; 114, 190; 114, 191; 116, 192; 116, 193; 118, 194; 118, 195; 120, 196; 120, 197; 122, 198; 122, 199; 124, 200; 124, 201; 129, 202; 129, 203; 130, 204; 130, 205; 131, 206; 131, 207; 132, 208; 132, 209; 133, 210; 133, 211; 134, 212; 134, 213; 142, 214; 143, 215; 144, 216; 145, 217; 146, 218; 147, 219; 148, 220; 149, 221; 150, 222; 151, 223; 154, 224; 155, 225; 156, 226; 157, 227; 158, 228; 165, 229; 165, 230; 166, 231; 166, 232; 166, 233; 167, 234; 170, 235; 170, 236; 171, 237; 171, 238; 174, 239; 174, 240; 178, 241; 178, 242; 179, 243; 179, 244; 180, 245; 180, 246; 181, 247; 181, 248; 182, 249; 182, 250; 183, 251; 183, 252; 185, 253; 186, 254; 187, 255; 189, 256; 191, 257; 191, 258; 193, 259; 193, 260; 195, 261; 195, 262; 197, 263; 197, 264; 199, 265; 199, 266; 201, 267; 201, 268; 201, 269; 205, 270; 209, 271; 213, 272; 215, 273; 215, 274; 216, 275; 216, 276; 217, 277; 217, 278; 218, 279; 218, 280; 219, 281; 219, 282; 220, 283; 220, 284; 221, 285; 221, 286; 222, 287; 222, 288; 223, 289; 223, 290; 224, 291; 224, 292; 225, 293; 225, 294; 226, 295; 226, 296; 227, 297; 227, 298; 229, 299; 229, 300; 229, 301; 230, 302; 230, 303; 230, 304; 233, 305; 233, 306; 234, 307; 234, 308; 243, 309; 243, 310; 244, 311; 245, 312; 245, 313; 246, 314; 247, 315; 247, 316; 248, 317; 249, 318; 249, 319; 250, 320; 256, 321; 257, 322; 258, 323; 258, 324; 261, 325; 262, 326; 262, 327; 263, 328; 264, 329; 264, 330; 265, 331; 266, 332; 266, 333; 274, 334; 274, 335; 275, 336; 275, 337; 276, 338; 277, 339; 277, 340; 278, 341; 279, 342; 279, 343; 280, 344; 281, 345; 281, 346; 282, 347; 283, 348; 283, 349; 285, 350; 285, 351; 287, 352; 287, 353; 289, 354; 289, 355; 292, 356; 292, 357; 294, 358; 294, 359; 295, 360; 295, 361; 296, 362; 296, 363; 297, 364; 297, 365; 298, 366; 298, 367; 299, 368; 300, 369; 301, 370; 302, 371; 303, 372; 304, 373; 305, 374; 306, 375; 308, 376; 308, 377; 309, 378; 309, 379; 312, 380; 312, 381; 315, 382; 315, 383; 318, 384; 318, 385; 324, 386; 334, 387; 334, 388; 336, 389; 336, 390; 338, 391; 338, 392; 339, 393; 339, 394; 342, 395; 342, 396; 345, 397; 345, 398; 357, 399; 357, 400; 358, 401; 358, 402; 359, 403; 359, 404; 359, 405; 362, 406; 362, 407; 363, 408; 366, 409; 366, 410; 374, 411; 374, 412; 375, 413; 375, 414; 377, 415; 377, 416; 378, 417; 378, 418; 380, 419; 380, 420; 382, 421; 382, 422; 384, 423; 384, 424; 397, 425; 397, 426; 400, 427; 405, 428; 405, 429; 408, 430; 408, 431; 409, 432; 409, 433; 412, 434; 412, 435; 414, 436; 414, 437; 415, 438; 415, 439; 427, 440; 427, 441; 435, 442; 435, 443; 437, 444; 437, 445; 438, 446; 438, 447; 440, 448; 440, 449; 442, 450; 442, 451; 444, 452; 444, 453; 448, 454; 448, 455; 450, 456; 450, 457; 453, 458; 453, 459 | def prepare_io_example_1() -> Tuple[devicetools.Nodes, devicetools.Elements]:
# noinspection PyUnresolvedReferences
"""Prepare an IO example configuration.
>>> from hydpy.core.examples import prepare_io_example_1
>>> nodes, elements = prepare_io_example_1()
(1) Prepares a short initialisation period of five days:
>>> from hydpy import pub
>>> pub.timegrids
Timegrids(Timegrid('2000-01-01 00:00:00',
'2000-01-05 00:00:00',
'1d'))
(2) Prepares a plain IO testing directory structure:
>>> pub.sequencemanager.inputdirpath
'inputpath'
>>> pub.sequencemanager.fluxdirpath
'outputpath'
>>> pub.sequencemanager.statedirpath
'outputpath'
>>> pub.sequencemanager.nodedirpath
'nodepath'
>>> import os
>>> from hydpy import TestIO
>>> with TestIO():
... print(sorted(filename for filename in os.listdir('.')
... if not filename.startswith('_')))
['inputpath', 'nodepath', 'outputpath']
(3) Returns three |Element| objects handling either application model
|lland_v1| or |lland_v2|, and two |Node| objects handling variables
`Q` and `T`:
>>> for element in elements:
... print(element.name, element.model)
element1 lland_v1
element2 lland_v1
element3 lland_v2
>>> for node in nodes:
... print(node.name, node.variable)
node1 Q
node2 T
(4) Prepares the time series data of the input sequence
|lland_inputs.Nied|, flux sequence |lland_fluxes.NKor|, and state
sequence |lland_states.BoWa| for each model instance, and |Sim| for
each node instance (all values are different), e.g.:
>>> nied1 = elements.element1.model.sequences.inputs.nied
>>> nied1.series
InfoArray([ 0., 1., 2., 3.])
>>> nkor1 = elements.element1.model.sequences.fluxes.nkor
>>> nkor1.series
InfoArray([[ 12.],
[ 13.],
[ 14.],
[ 15.]])
>>> bowa3 = elements.element3.model.sequences.states.bowa
>>> bowa3.series
InfoArray([[ 48., 49., 50.],
[ 51., 52., 53.],
[ 54., 55., 56.],
[ 57., 58., 59.]])
>>> sim2 = nodes.node2.sequences.sim
>>> sim2.series
InfoArray([ 64., 65., 66., 67.])
(5) All sequences carry |numpy.ndarray| objects with (deep) copies
of the time series data for testing:
>>> import numpy
>>> (numpy.all(nied1.series == nied1.testarray) and
... numpy.all(nkor1.series == nkor1.testarray) and
... numpy.all(bowa3.series == bowa3.testarray) and
... numpy.all(sim2.series == sim2.testarray))
InfoArray(True, dtype=bool)
>>> bowa3.series[1, 2] = -999.0
>>> numpy.all(bowa3.series == bowa3.testarray)
InfoArray(False, dtype=bool)
"""
from hydpy import TestIO
TestIO.clear()
from hydpy.core.filetools import SequenceManager
hydpy.pub.sequencemanager = SequenceManager()
with TestIO():
hydpy.pub.sequencemanager.inputdirpath = 'inputpath'
hydpy.pub.sequencemanager.fluxdirpath = 'outputpath'
hydpy.pub.sequencemanager.statedirpath = 'outputpath'
hydpy.pub.sequencemanager.nodedirpath = 'nodepath'
hydpy.pub.timegrids = '2000-01-01', '2000-01-05', '1d'
from hydpy import Node, Nodes, Element, Elements, prepare_model
node1 = Node('node1')
node2 = Node('node2', variable='T')
nodes = Nodes(node1, node2)
element1 = Element('element1', outlets=node1)
element2 = Element('element2', outlets=node1)
element3 = Element('element3', outlets=node1)
elements = Elements(element1, element2, element3)
from hydpy.models import lland_v1, lland_v2
element1.model = prepare_model(lland_v1)
element2.model = prepare_model(lland_v1)
element3.model = prepare_model(lland_v2)
from hydpy.models.lland import ACKER
for idx, element in enumerate(elements):
parameters = element.model.parameters
parameters.control.nhru(idx+1)
parameters.control.lnk(ACKER)
parameters.derived.absfhru(10.0)
with hydpy.pub.options.printprogress(False):
nodes.prepare_simseries()
elements.prepare_inputseries()
elements.prepare_fluxseries()
elements.prepare_stateseries()
def init_values(seq, value1_):
value2_ = value1_ + len(seq.series.flatten())
values_ = numpy.arange(value1_, value2_, dtype=float)
seq.testarray = values_.reshape(seq.seriesshape)
seq.series = seq.testarray.copy()
return value2_
import numpy
value1 = 0
for subname, seqname in zip(['inputs', 'fluxes', 'states'],
['nied', 'nkor', 'bowa']):
for element in elements:
subseqs = getattr(element.model.sequences, subname)
value1 = init_values(getattr(subseqs, seqname), value1)
for node in nodes:
value1 = init_values(node.sequences.sim, value1)
return nodes, elements |
0, module; 1, function_definition; 2, function_name:comparison_table; 3, parameters; 4, comment:# pragma: no cover; 5, block; 6, identifier:self; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, default_parameter; 12, default_parameter; 13, default_parameter; 14, default_parameter; 15, expression_statement; 16, if_statement; 17, if_statement; 18, if_statement; 19, if_statement; 20, if_statement; 21, expression_statement; 22, expression_statement; 23, expression_statement; 24, expression_statement; 25, expression_statement; 26, expression_statement; 27, if_statement; 28, expression_statement; 29, if_statement; 30, if_statement; 31, if_statement; 32, if_statement; 33, if_statement; 34, expression_statement; 35, expression_statement; 36, expression_statement; 37, expression_statement; 38, expression_statement; 39, expression_statement; 40, if_statement; 41, for_statement; 42, if_statement; 43, return_statement; 44, identifier:caption; 45, None; 46, identifier:label; 47, string:"tab:model_comp"; 48, identifier:hlines; 49, True; 50, identifier:aic; 51, True; 52, identifier:bic; 53, True; 54, identifier:dic; 55, True; 56, identifier:sort; 57, string:"bic"; 58, identifier:descending; 59, True; 60, comment:"""
Return a LaTeX ready table of model comparisons.
Parameters
----------
caption : str, optional
The table caption to insert.
label : str, optional
The table label to insert.
hlines : bool, optional
Whether to insert hlines in the table or not.
aic : bool, optional
Whether to include a column for AICc or not.
bic : bool, optional
Whether to include a column for BIC or not.
dic : bool, optional
Whether to include a column for DIC or not.
sort : str, optional
How to sort the models. Should be one of "bic", "aic" or "dic".
descending : bool, optional
The sort order.
Returns
-------
str
A LaTeX table to be copied into your document.
"""; 61, comparison_operator:sort == "bic"; 62, block; 63, comparison_operator:sort == "aic"; 64, block; 65, comparison_operator:sort == "dic"; 66, block; 67, comparison_operator:caption is None; 68, block; 69, comparison_operator:label is None; 70, block; 71, assignment; 72, assignment; 73, assignment; 74, assignment; 75, assignment; 76, assignment; 77, identifier:hlines; 78, block; 79, augmented_assignment; 80, identifier:hlines; 81, block; 82, identifier:aic; 83, block; 84, else_clause; 85, identifier:bic; 86, block; 87, else_clause; 88, identifier:dic; 89, block; 90, else_clause; 91, comparison_operator:sort == "bic"; 92, block; 93, elif_clause; 94, elif_clause; 95, else_clause; 96, assignment; 97, assignment; 98, assignment; 99, assignment; 100, assignment; 101, assignment; 102, identifier:descending; 103, block; 104, identifier:i; 105, identifier:indexes; 106, block; 107, identifier:hlines; 108, block; 109, binary_operator:base_string % (column_text, center_text); 110, identifier:sort; 111, string:"bic"; 112, assert_statement; 113, identifier:sort; 114, string:"aic"; 115, assert_statement; 116, identifier:sort; 117, string:"dic"; 118, assert_statement; 119, identifier:caption; 120, None; 121, expression_statement; 122, identifier:label; 123, None; 124, expression_statement; 125, identifier:base_string; 126, call; 127, identifier:end_text; 128, string:" \\\\ \n"; 129, identifier:num_cols; 130, binary_operator:1 + (1 if aic else 0) + (1 if bic else 0); 131, identifier:column_text; 132, binary_operator:"c" * (num_cols + 1); 133, identifier:center_text; 134, string:""; 135, identifier:hline_text; 136, string:"\\hline\n"; 137, expression_statement; 138, identifier:center_text; 139, binary_operator:"\tModel" + (" & AIC" if aic else "") + (" & BIC " if bic else "") \
+ (" & DIC " if dic else "") + end_text; 140, expression_statement; 141, expression_statement; 142, block; 143, expression_statement; 144, block; 145, expression_statement; 146, block; 147, identifier:sort; 148, string:"bic"; 149, expression_statement; 150, comparison_operator:sort == "aic"; 151, block; 152, comparison_operator:sort == "dic"; 153, block; 154, block; 155, identifier:good; 156, list_comprehension; 157, identifier:names; 158, list_comprehension; 159, identifier:aics; 160, list_comprehension; 161, identifier:bics; 162, list_comprehension; 163, identifier:to_sort; 164, conditional_expression:bics if sort == "bic" else aics; 165, identifier:indexes; 166, call; 167, expression_statement; 168, expression_statement; 169, if_statement; 170, if_statement; 171, if_statement; 172, expression_statement; 173, expression_statement; 174, expression_statement; 175, identifier:base_string; 176, tuple; 177, identifier:bic; 178, string:"You cannot sort by BIC if you turn it off"; 179, identifier:aic; 180, string:"You cannot sort by AIC if you turn it off"; 181, identifier:dic; 182, string:"You cannot sort by DIC if you turn it off"; 183, assignment; 184, assignment; 185, identifier:get_latex_table_frame; 186, argument_list; 187, binary_operator:1 + (1 if aic else 0); 188, parenthesized_expression; 189, string:"c"; 190, parenthesized_expression; 191, augmented_assignment; 192, binary_operator:"\tModel" + (" & AIC" if aic else "") + (" & BIC " if bic else "") \
+ (" & DIC " if dic else ""); 193, identifier:end_text; 194, augmented_assignment; 195, assignment; 196, expression_statement; 197, assignment; 198, expression_statement; 199, assignment; 200, expression_statement; 201, assignment; 202, identifier:sort; 203, string:"aic"; 204, expression_statement; 205, identifier:sort; 206, string:"dic"; 207, expression_statement; 208, raise_statement; 209, identifier:i; 210, for_in_clause; 211, if_clause; 212, attribute; 213, for_in_clause; 214, subscript; 215, for_in_clause; 216, subscript; 217, for_in_clause; 218, identifier:bics; 219, comparison_operator:sort == "bic"; 220, identifier:aics; 221, attribute; 222, argument_list; 223, assignment; 224, assignment; 225, identifier:aic; 226, block; 227, identifier:bic; 228, block; 229, identifier:dic; 230, block; 231, augmented_assignment; 232, augmented_assignment; 233, augmented_assignment; 234, identifier:column_text; 235, identifier:center_text; 236, identifier:caption; 237, string:""; 238, identifier:label; 239, string:""; 240, identifier:caption; 241, identifier:label; 242, integer:1; 243, parenthesized_expression; 244, conditional_expression:1 if bic else 0; 245, binary_operator:num_cols + 1; 246, identifier:center_text; 247, identifier:hline_text; 248, binary_operator:"\tModel" + (" & AIC" if aic else "") + (" & BIC " if bic else ""); 249, line_continuation:\; 250, parenthesized_expression; 251, identifier:center_text; 252, binary_operator:"\t" + hline_text; 253, identifier:aics; 254, call; 255, assignment; 256, identifier:bics; 257, call; 258, assignment; 259, identifier:dics; 260, call; 261, assignment; 262, identifier:to_sort; 263, identifier:bics; 264, assignment; 265, assignment; 266, call; 267, pattern_list; 268, call; 269, comparison_operator:t is not None; 270, subscript; 271, identifier:name; 272, identifier:g; 273, identifier:good; 274, identifier:aics; 275, identifier:g; 276, identifier:g; 277, identifier:good; 278, identifier:bics; 279, identifier:g; 280, identifier:g; 281, identifier:good; 282, identifier:sort; 283, string:"bic"; 284, identifier:np; 285, identifier:argsort; 286, identifier:to_sort; 287, identifier:indexes; 288, subscript; 289, identifier:line; 290, binary_operator:"\t" + names[i]; 291, expression_statement; 292, expression_statement; 293, expression_statement; 294, identifier:line; 295, identifier:end_text; 296, identifier:center_text; 297, identifier:line; 298, identifier:center_text; 299, binary_operator:"\t" + hline_text; 300, conditional_expression:1 if aic else 0; 301, integer:1; 302, identifier:bic; 303, integer:0; 304, identifier:num_cols; 305, integer:1; 306, binary_operator:"\tModel" + (" & AIC" if aic else ""); 307, parenthesized_expression; 308, conditional_expression:" & DIC " if dic else ""; 309, string:"\t"; 310, identifier:hline_text; 311, attribute; 312, argument_list; 313, identifier:aics; 314, call; 315, attribute; 316, argument_list; 317, identifier:bics; 318, call; 319, attribute; 320, argument_list; 321, identifier:dics; 322, call; 323, identifier:to_sort; 324, identifier:aics; 325, identifier:to_sort; 326, identifier:dics; 327, identifier:ValueError; 328, argument_list; 329, identifier:i; 330, identifier:t; 331, identifier:enumerate; 332, argument_list; 333, identifier:t; 334, None; 335, attribute; 336, identifier:g; 337, identifier:indexes; 338, slice; 339, string:"\t"; 340, subscript; 341, augmented_assignment; 342, augmented_assignment; 343, augmented_assignment; 344, string:"\t"; 345, identifier:hline_text; 346, integer:1; 347, identifier:aic; 348, integer:0; 349, string:"\tModel"; 350, parenthesized_expression; 351, conditional_expression:" & BIC " if bic else ""; 352, string:" & DIC "; 353, identifier:dic; 354, string:""; 355, identifier:self; 356, identifier:aic; 357, attribute; 358, argument_list; 359, identifier:self; 360, identifier:bic; 361, attribute; 362, argument_list; 363, identifier:self; 364, identifier:dic; 365, attribute; 366, argument_list; 367, binary_operator:"sort %s not recognised, must be dic, aic or dic" % sort; 368, identifier:to_sort; 369, attribute; 370, identifier:chains; 371, unary_operator; 372, identifier:names; 373, identifier:i; 374, identifier:line; 375, binary_operator:" & %5.1f " % aics[i]; 376, identifier:line; 377, binary_operator:" & %5.1f " % bics[i]; 378, identifier:line; 379, binary_operator:" & %5.1f " % dics[i]; 380, conditional_expression:" & AIC" if aic else ""; 381, string:" & BIC "; 382, identifier:bic; 383, string:""; 384, identifier:np; 385, identifier:zeros; 386, call; 387, identifier:np; 388, identifier:zeros; 389, call; 390, identifier:np; 391, identifier:zeros; 392, call; 393, string:"sort %s not recognised, must be dic, aic or dic"; 394, identifier:sort; 395, identifier:self; 396, identifier:parent; 397, integer:1; 398, string:" & %5.1f "; 399, subscript; 400, string:" & %5.1f "; 401, subscript; 402, string:" & %5.1f "; 403, subscript; 404, string:" & AIC"; 405, identifier:aic; 406, string:""; 407, identifier:len; 408, argument_list; 409, identifier:len; 410, argument_list; 411, identifier:len; 412, argument_list; 413, identifier:aics; 414, identifier:i; 415, identifier:bics; 416, identifier:i; 417, identifier:dics; 418, identifier:i; 419, attribute; 420, attribute; 421, attribute; 422, attribute; 423, identifier:chains; 424, attribute; 425, identifier:chains; 426, attribute; 427, identifier:chains; 428, identifier:self; 429, identifier:parent; 430, identifier:self; 431, identifier:parent; 432, identifier:self; 433, identifier:parent | 0, 1; 1, 2; 1, 3; 1, 4; 1, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 3, 12; 3, 13; 3, 14; 5, 15; 5, 16; 5, 17; 5, 18; 5, 19; 5, 20; 5, 21; 5, 22; 5, 23; 5, 24; 5, 25; 5, 26; 5, 27; 5, 28; 5, 29; 5, 30; 5, 31; 5, 32; 5, 33; 5, 34; 5, 35; 5, 36; 5, 37; 5, 38; 5, 39; 5, 40; 5, 41; 5, 42; 5, 43; 7, 44; 7, 45; 8, 46; 8, 47; 9, 48; 9, 49; 10, 50; 10, 51; 11, 52; 11, 53; 12, 54; 12, 55; 13, 56; 13, 57; 14, 58; 14, 59; 15, 60; 16, 61; 16, 62; 17, 63; 17, 64; 18, 65; 18, 66; 19, 67; 19, 68; 20, 69; 20, 70; 21, 71; 22, 72; 23, 73; 24, 74; 25, 75; 26, 76; 27, 77; 27, 78; 28, 79; 29, 80; 29, 81; 30, 82; 30, 83; 30, 84; 31, 85; 31, 86; 31, 87; 32, 88; 32, 89; 32, 90; 33, 91; 33, 92; 33, 93; 33, 94; 33, 95; 34, 96; 35, 97; 36, 98; 37, 99; 38, 100; 39, 101; 40, 102; 40, 103; 41, 104; 41, 105; 41, 106; 42, 107; 42, 108; 43, 109; 61, 110; 61, 111; 62, 112; 63, 113; 63, 114; 64, 115; 65, 116; 65, 117; 66, 118; 67, 119; 67, 120; 68, 121; 69, 122; 69, 123; 70, 124; 71, 125; 71, 126; 72, 127; 72, 128; 73, 129; 73, 130; 74, 131; 74, 132; 75, 133; 75, 134; 76, 135; 76, 136; 78, 137; 79, 138; 79, 139; 81, 140; 83, 141; 84, 142; 86, 143; 87, 144; 89, 145; 90, 146; 91, 147; 91, 148; 92, 149; 93, 150; 93, 151; 94, 152; 94, 153; 95, 154; 96, 155; 96, 156; 97, 157; 97, 158; 98, 159; 98, 160; 99, 161; 99, 162; 100, 163; 100, 164; 101, 165; 101, 166; 103, 167; 106, 168; 106, 169; 106, 170; 106, 171; 106, 172; 106, 173; 108, 174; 109, 175; 109, 176; 112, 177; 112, 178; 115, 179; 115, 180; 118, 181; 118, 182; 121, 183; 124, 184; 126, 185; 126, 186; 130, 187; 130, 188; 132, 189; 132, 190; 137, 191; 139, 192; 139, 193; 140, 194; 141, 195; 142, 196; 143, 197; 144, 198; 145, 199; 146, 200; 149, 201; 150, 202; 150, 203; 151, 204; 152, 205; 152, 206; 153, 207; 154, 208; 156, 209; 156, 210; 156, 211; 158, 212; 158, 213; 160, 214; 160, 215; 162, 216; 162, 217; 164, 218; 164, 219; 164, 220; 166, 221; 166, 222; 167, 223; 168, 224; 169, 225; 169, 226; 170, 227; 170, 228; 171, 229; 171, 230; 172, 231; 173, 232; 174, 233; 176, 234; 176, 235; 183, 236; 183, 237; 184, 238; 184, 239; 186, 240; 186, 241; 187, 242; 187, 243; 188, 244; 190, 245; 191, 246; 191, 247; 192, 248; 192, 249; 192, 250; 194, 251; 194, 252; 195, 253; 195, 254; 196, 255; 197, 256; 197, 257; 198, 258; 199, 259; 199, 260; 200, 261; 201, 262; 201, 263; 204, 264; 207, 265; 208, 266; 210, 267; 210, 268; 211, 269; 212, 270; 212, 271; 213, 272; 213, 273; 214, 274; 214, 275; 215, 276; 215, 277; 216, 278; 216, 279; 217, 280; 217, 281; 219, 282; 219, 283; 221, 284; 221, 285; 222, 286; 223, 287; 223, 288; 224, 289; 224, 290; 226, 291; 228, 292; 230, 293; 231, 294; 231, 295; 232, 296; 232, 297; 233, 298; 233, 299; 243, 300; 244, 301; 244, 302; 244, 303; 245, 304; 245, 305; 248, 306; 248, 307; 250, 308; 252, 309; 252, 310; 254, 311; 254, 312; 255, 313; 255, 314; 257, 315; 257, 316; 258, 317; 258, 318; 260, 319; 260, 320; 261, 321; 261, 322; 264, 323; 264, 324; 265, 325; 265, 326; 266, 327; 266, 328; 267, 329; 267, 330; 268, 331; 268, 332; 269, 333; 269, 334; 270, 335; 270, 336; 288, 337; 288, 338; 290, 339; 290, 340; 291, 341; 292, 342; 293, 343; 299, 344; 299, 345; 300, 346; 300, 347; 300, 348; 306, 349; 306, 350; 307, 351; 308, 352; 308, 353; 308, 354; 311, 355; 311, 356; 314, 357; 314, 358; 315, 359; 315, 360; 318, 361; 318, 362; 319, 363; 319, 364; 322, 365; 322, 366; 328, 367; 332, 368; 335, 369; 335, 370; 338, 371; 340, 372; 340, 373; 341, 374; 341, 375; 342, 376; 342, 377; 343, 378; 343, 379; 350, 380; 351, 381; 351, 382; 351, 383; 357, 384; 357, 385; 358, 386; 361, 387; 361, 388; 362, 389; 365, 390; 365, 391; 366, 392; 367, 393; 367, 394; 369, 395; 369, 396; 371, 397; 375, 398; 375, 399; 377, 400; 377, 401; 379, 402; 379, 403; 380, 404; 380, 405; 380, 406; 386, 407; 386, 408; 389, 409; 389, 410; 392, 411; 392, 412; 399, 413; 399, 414; 401, 415; 401, 416; 403, 417; 403, 418; 408, 419; 410, 420; 412, 421; 419, 422; 419, 423; 420, 424; 420, 425; 421, 426; 421, 427; 422, 428; 422, 429; 424, 430; 424, 431; 426, 432; 426, 433 | def comparison_table(self, caption=None, label="tab:model_comp", hlines=True,
aic=True, bic=True, dic=True, sort="bic", descending=True): # pragma: no cover
"""
Return a LaTeX ready table of model comparisons.
Parameters
----------
caption : str, optional
The table caption to insert.
label : str, optional
The table label to insert.
hlines : bool, optional
Whether to insert hlines in the table or not.
aic : bool, optional
Whether to include a column for AICc or not.
bic : bool, optional
Whether to include a column for BIC or not.
dic : bool, optional
Whether to include a column for DIC or not.
sort : str, optional
How to sort the models. Should be one of "bic", "aic" or "dic".
descending : bool, optional
The sort order.
Returns
-------
str
A LaTeX table to be copied into your document.
"""
if sort == "bic":
assert bic, "You cannot sort by BIC if you turn it off"
if sort == "aic":
assert aic, "You cannot sort by AIC if you turn it off"
if sort == "dic":
assert dic, "You cannot sort by DIC if you turn it off"
if caption is None:
caption = ""
if label is None:
label = ""
base_string = get_latex_table_frame(caption, label)
end_text = " \\\\ \n"
num_cols = 1 + (1 if aic else 0) + (1 if bic else 0)
column_text = "c" * (num_cols + 1)
center_text = ""
hline_text = "\\hline\n"
if hlines:
center_text += hline_text
center_text += "\tModel" + (" & AIC" if aic else "") + (" & BIC " if bic else "") \
+ (" & DIC " if dic else "") + end_text
if hlines:
center_text += "\t" + hline_text
if aic:
aics = self.aic()
else:
aics = np.zeros(len(self.parent.chains))
if bic:
bics = self.bic()
else:
bics = np.zeros(len(self.parent.chains))
if dic:
dics = self.dic()
else:
dics = np.zeros(len(self.parent.chains))
if sort == "bic":
to_sort = bics
elif sort == "aic":
to_sort = aics
elif sort == "dic":
to_sort = dics
else:
raise ValueError("sort %s not recognised, must be dic, aic or dic" % sort)
good = [i for i, t in enumerate(to_sort) if t is not None]
names = [self.parent.chains[g].name for g in good]
aics = [aics[g] for g in good]
bics = [bics[g] for g in good]
to_sort = bics if sort == "bic" else aics
indexes = np.argsort(to_sort)
if descending:
indexes = indexes[::-1]
for i in indexes:
line = "\t" + names[i]
if aic:
line += " & %5.1f " % aics[i]
if bic:
line += " & %5.1f " % bics[i]
if dic:
line += " & %5.1f " % dics[i]
line += end_text
center_text += line
if hlines:
center_text += "\t" + hline_text
return base_string % (column_text, center_text) |
0, module; 1, function_definition; 2, function_name:_initDevClasses; 3, parameters; 4, block; 5, identifier:self; 6, expression_statement; 7, expression_statement; 8, expression_statement; 9, expression_statement; 10, expression_statement; 11, expression_statement; 12, if_statement; 13, for_statement; 14, expression_statement; 15, expression_statement; 16, expression_statement; 17, for_statement; 18, comment:"""Sort block devices into lists depending on device class and
initialize device type map and partition map."""; 19, assignment; 20, assignment; 21, assignment; 22, assignment; 23, assignment; 24, comparison_operator:self._mapMajorDevclass is None; 25, block; 26, identifier:dev; 27, attribute; 28, block; 29, call; 30, call; 31, assignment; 32, identifier:partdev; 33, identifier:otherdevs; 34, block; 35, attribute; 36, dictionary; 37, attribute; 38, dictionary; 39, attribute; 40, dictionary; 41, identifier:basedevs; 42, list; 43, identifier:otherdevs; 44, list; 45, attribute; 46, None; 47, expression_statement; 48, identifier:self; 49, identifier:_diskStats; 50, expression_statement; 51, expression_statement; 52, if_statement; 53, attribute; 54, argument_list; 55, attribute; 56, argument_list; 57, identifier:idx; 58, integer:0; 59, while_statement; 60, for_statement; 61, identifier:self; 62, identifier:_devClassTree; 63, identifier:self; 64, identifier:_partitionTree; 65, identifier:self; 66, identifier:_mapDevType; 67, identifier:self; 68, identifier:_mapMajorDevclass; 69, call; 70, assignment; 71, assignment; 72, comparison_operator:devclass is not None; 73, block; 74, identifier:basedevs; 75, identifier:sort; 76, keyword_argument; 77, keyword_argument; 78, identifier:otherdevs; 79, identifier:sort; 80, keyword_argument; 81, keyword_argument; 82, comparison_operator:len(basedevs[idx]) > partdev; 83, block; 84, identifier:dev; 85, subscript; 86, block; 87, attribute; 88, argument_list; 89, identifier:stats; 90, subscript; 91, identifier:devclass; 92, call; 93, identifier:devclass; 94, None; 95, expression_statement; 96, if_statement; 97, identifier:key; 98, identifier:len; 99, identifier:reverse; 100, True; 101, identifier:key; 102, identifier:len; 103, identifier:reverse; 104, True; 105, call; 106, identifier:partdev; 107, expression_statement; 108, identifier:basedevs; 109, slice; 110, if_statement; 111, identifier:self; 112, identifier:_initBlockMajorMap; 113, attribute; 114, identifier:dev; 115, attribute; 116, argument_list; 117, assignment; 118, call; 119, block; 120, else_clause; 121, identifier:len; 122, argument_list; 123, augmented_assignment; 124, identifier:idx; 125, call; 126, block; 127, identifier:self; 128, identifier:_diskStats; 129, attribute; 130, identifier:get; 131, subscript; 132, identifier:devdir; 133, call; 134, attribute; 135, argument_list; 136, if_statement; 137, expression_statement; 138, expression_statement; 139, expression_statement; 140, block; 141, subscript; 142, identifier:idx; 143, integer:1; 144, attribute; 145, argument_list; 146, if_statement; 147, expression_statement; 148, expression_statement; 149, identifier:self; 150, identifier:_mapMajorDevclass; 151, identifier:stats; 152, string; 153, attribute; 154, argument_list; 155, attribute; 156, identifier:isdir; 157, identifier:devdir; 158, not_operator; 159, block; 160, call; 161, assignment; 162, call; 163, expression_statement; 164, identifier:basedevs; 165, identifier:idx; 166, identifier:re; 167, identifier:match; 168, binary_operator:"%s(\d+|p\d+)$" % dev; 169, identifier:partdev; 170, not_operator; 171, block; 172, call; 173, assignment; 174, string_content:major; 175, attribute; 176, identifier:join; 177, identifier:sysfsBlockdevDir; 178, identifier:dev; 179, identifier:os; 180, identifier:path; 181, call; 182, expression_statement; 183, attribute; 184, argument_list; 185, subscript; 186, identifier:devclass; 187, attribute; 188, argument_list; 189, call; 190, string:"%s(\d+|p\d+)$"; 191, identifier:dev; 192, call; 193, expression_statement; 194, attribute; 195, argument_list; 196, subscript; 197, string; 198, identifier:os; 199, identifier:path; 200, attribute; 201, argument_list; 202, assignment; 203, subscript; 204, identifier:append; 205, identifier:dev; 206, attribute; 207, identifier:dev; 208, identifier:basedevs; 209, identifier:append; 210, identifier:dev; 211, attribute; 212, argument_list; 213, attribute; 214, argument_list; 215, assignment; 216, subscript; 217, identifier:append; 218, identifier:partdev; 219, attribute; 220, identifier:partdev; 221, string_content:part; 222, attribute; 223, identifier:has_key; 224, identifier:devclass; 225, subscript; 226, list; 227, attribute; 228, identifier:devclass; 229, identifier:self; 230, identifier:_mapDevType; 231, identifier:otherdevs; 232, identifier:append; 233, identifier:dev; 234, attribute; 235, identifier:has_key; 236, identifier:dev; 237, subscript; 238, list; 239, attribute; 240, identifier:dev; 241, identifier:self; 242, identifier:_mapDevType; 243, identifier:self; 244, identifier:_devClassTree; 245, attribute; 246, identifier:devclass; 247, identifier:self; 248, identifier:_devClassTree; 249, identifier:self; 250, identifier:_partitionTree; 251, attribute; 252, identifier:dev; 253, identifier:self; 254, identifier:_partitionTree; 255, identifier:self; 256, identifier:_devClassTree; 257, identifier:self; 258, identifier:_partitionTree | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 6, 18; 7, 19; 8, 20; 9, 21; 10, 22; 11, 23; 12, 24; 12, 25; 13, 26; 13, 27; 13, 28; 14, 29; 15, 30; 16, 31; 17, 32; 17, 33; 17, 34; 19, 35; 19, 36; 20, 37; 20, 38; 21, 39; 21, 40; 22, 41; 22, 42; 23, 43; 23, 44; 24, 45; 24, 46; 25, 47; 27, 48; 27, 49; 28, 50; 28, 51; 28, 52; 29, 53; 29, 54; 30, 55; 30, 56; 31, 57; 31, 58; 34, 59; 34, 60; 35, 61; 35, 62; 37, 63; 37, 64; 39, 65; 39, 66; 45, 67; 45, 68; 47, 69; 50, 70; 51, 71; 52, 72; 52, 73; 53, 74; 53, 75; 54, 76; 54, 77; 55, 78; 55, 79; 56, 80; 56, 81; 59, 82; 59, 83; 60, 84; 60, 85; 60, 86; 69, 87; 69, 88; 70, 89; 70, 90; 71, 91; 71, 92; 72, 93; 72, 94; 73, 95; 73, 96; 76, 97; 76, 98; 77, 99; 77, 100; 80, 101; 80, 102; 81, 103; 81, 104; 82, 105; 82, 106; 83, 107; 85, 108; 85, 109; 86, 110; 87, 111; 87, 112; 90, 113; 90, 114; 92, 115; 92, 116; 95, 117; 96, 118; 96, 119; 96, 120; 105, 121; 105, 122; 107, 123; 109, 124; 110, 125; 110, 126; 113, 127; 113, 128; 115, 129; 115, 130; 116, 131; 117, 132; 117, 133; 118, 134; 118, 135; 119, 136; 119, 137; 119, 138; 119, 139; 120, 140; 122, 141; 123, 142; 123, 143; 125, 144; 125, 145; 126, 146; 126, 147; 126, 148; 129, 149; 129, 150; 131, 151; 131, 152; 133, 153; 133, 154; 134, 155; 134, 156; 135, 157; 136, 158; 136, 159; 137, 160; 138, 161; 139, 162; 140, 163; 141, 164; 141, 165; 144, 166; 144, 167; 145, 168; 145, 169; 146, 170; 146, 171; 147, 172; 148, 173; 152, 174; 153, 175; 153, 176; 154, 177; 154, 178; 155, 179; 155, 180; 158, 181; 159, 182; 160, 183; 160, 184; 161, 185; 161, 186; 162, 187; 162, 188; 163, 189; 168, 190; 168, 191; 170, 192; 171, 193; 172, 194; 172, 195; 173, 196; 173, 197; 175, 198; 175, 199; 181, 200; 181, 201; 182, 202; 183, 203; 183, 204; 184, 205; 185, 206; 185, 207; 187, 208; 187, 209; 188, 210; 189, 211; 189, 212; 192, 213; 192, 214; 193, 215; 194, 216; 194, 217; 195, 218; 196, 219; 196, 220; 197, 221; 200, 222; 200, 223; 201, 224; 202, 225; 202, 226; 203, 227; 203, 228; 206, 229; 206, 230; 211, 231; 211, 232; 212, 233; 213, 234; 213, 235; 214, 236; 215, 237; 215, 238; 216, 239; 216, 240; 219, 241; 219, 242; 222, 243; 222, 244; 225, 245; 225, 246; 227, 247; 227, 248; 234, 249; 234, 250; 237, 251; 237, 252; 239, 253; 239, 254; 245, 255; 245, 256; 251, 257; 251, 258 | def _initDevClasses(self):
"""Sort block devices into lists depending on device class and
initialize device type map and partition map."""
self._devClassTree = {}
self._partitionTree = {}
self._mapDevType = {}
basedevs = []
otherdevs = []
if self._mapMajorDevclass is None:
self._initBlockMajorMap()
for dev in self._diskStats:
stats = self._diskStats[dev]
devclass = self._mapMajorDevclass.get(stats['major'])
if devclass is not None:
devdir = os.path.join(sysfsBlockdevDir, dev)
if os.path.isdir(devdir):
if not self._devClassTree.has_key(devclass):
self._devClassTree[devclass] = []
self._devClassTree[devclass].append(dev)
self._mapDevType[dev] = devclass
basedevs.append(dev)
else:
otherdevs.append(dev)
basedevs.sort(key=len, reverse=True)
otherdevs.sort(key=len, reverse=True)
idx = 0
for partdev in otherdevs:
while len(basedevs[idx]) > partdev:
idx += 1
for dev in basedevs[idx:]:
if re.match("%s(\d+|p\d+)$" % dev, partdev):
if not self._partitionTree.has_key(dev):
self._partitionTree[dev] = []
self._partitionTree[dev].append(partdev)
self._mapDevType[partdev] = 'part' |
0, module; 1, function_definition; 2, function_name:zdiffstore; 3, parameters; 4, block; 5, identifier:self; 6, identifier:dest; 7, identifier:keys; 8, default_parameter; 9, expression_statement; 10, expression_statement; 11, expression_statement; 12, return_statement; 13, identifier:withscores; 14, False; 15, string; 16, assignment; 17, assignment; 18, call; 19, string_content:Compute the difference of multiple sorted.
The difference of sets specified by ``keys`` into a new sorted set
in ``dest``.; 20, identifier:keys; 21, binary_operator:(dest,) + tuple(keys); 22, identifier:wscores; 23, conditional_expression:'withscores' if withscores else ''; 24, attribute; 25, argument_list; 26, tuple; 27, call; 28, string; 29, identifier:withscores; 30, string; 31, identifier:self; 32, identifier:execute_script; 33, string; 34, identifier:keys; 35, identifier:wscores; 36, keyword_argument; 37, identifier:dest; 38, identifier:tuple; 39, argument_list; 40, string_content:withscores; 41, string_content:zdiffstore; 42, identifier:withscores; 43, identifier:withscores; 44, identifier:keys | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 4, 12; 8, 13; 8, 14; 9, 15; 10, 16; 11, 17; 12, 18; 15, 19; 16, 20; 16, 21; 17, 22; 17, 23; 18, 24; 18, 25; 21, 26; 21, 27; 23, 28; 23, 29; 23, 30; 24, 31; 24, 32; 25, 33; 25, 34; 25, 35; 25, 36; 26, 37; 27, 38; 27, 39; 28, 40; 33, 41; 36, 42; 36, 43; 39, 44 | def zdiffstore(self, dest, keys, withscores=False):
'''Compute the difference of multiple sorted.
The difference of sets specified by ``keys`` into a new sorted set
in ``dest``.
'''
keys = (dest,) + tuple(keys)
wscores = 'withscores' if withscores else ''
return self.execute_script('zdiffstore', keys, wscores,
withscores=withscores) |
0, module; 1, function_definition; 2, function_name:places_within_radius; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, dictionary_splat_pattern; 11, expression_statement; 12, expression_statement; 13, expression_statement; 14, expression_statement; 15, expression_statement; 16, expression_statement; 17, comment:# Make the query; 18, if_statement; 19, comment:# Assemble the result; 20, expression_statement; 21, for_statement; 22, return_statement; 23, identifier:place; 24, None; 25, identifier:latitude; 26, None; 27, identifier:longitude; 28, None; 29, identifier:radius; 30, integer:0; 31, identifier:kwargs; 32, comment:"""
Return descriptions of the places stored in the collection that are
within the circle specified by the given location and radius.
A list of dicts will be returned.
The center of the circle can be specified by the identifier of another
place in the collection with the *place* keyword argument.
Or, it can be specified by using both the *latitude* and *longitude*
keyword arguments.
By default the *radius* is given in kilometers, but you may also set
the *unit* keyword argument to ``'m'``, ``'mi'``, or ``'ft'``.
Limit the number of results returned with the *count* keyword argument.
Change the sorted order by setting the *sort* keyword argument to
``b'DESC'``.
"""; 33, assignment; 34, assignment; 35, assignment; 36, call; 37, assignment; 38, comparison_operator:place is not None; 39, block; 40, elif_clause; 41, else_clause; 42, assignment; 43, identifier:item; 44, identifier:response; 45, block; 46, identifier:ret; 47, subscript; 48, True; 49, subscript; 50, True; 51, subscript; 52, False; 53, attribute; 54, argument_list; 55, identifier:unit; 56, call; 57, identifier:place; 58, None; 59, expression_statement; 60, boolean_operator; 61, block; 62, block; 63, identifier:ret; 64, list; 65, expression_statement; 66, identifier:kwargs; 67, string; 68, identifier:kwargs; 69, string; 70, identifier:kwargs; 71, string; 72, identifier:kwargs; 73, identifier:setdefault; 74, string; 75, string; 76, attribute; 77, argument_list; 78, assignment; 79, parenthesized_expression; 80, parenthesized_expression; 81, expression_statement; 82, raise_statement; 83, call; 84, string_content:withdist; 85, string_content:withcoord; 86, string_content:withhash; 87, string_content:sort; 88, string_content:ASC; 89, identifier:kwargs; 90, identifier:setdefault; 91, string; 92, string; 93, identifier:response; 94, call; 95, comparison_operator:latitude is not None; 96, comparison_operator:longitude is not None; 97, assignment; 98, call; 99, attribute; 100, argument_list; 101, string_content:unit; 102, string_content:km; 103, attribute; 104, argument_list; 105, identifier:latitude; 106, None; 107, identifier:longitude; 108, None; 109, identifier:response; 110, call; 111, identifier:ValueError; 112, argument_list; 113, identifier:ret; 114, identifier:append; 115, dictionary; 116, attribute; 117, identifier:georadiusbymember; 118, attribute; 119, call; 120, identifier:radius; 121, dictionary_splat; 122, attribute; 123, argument_list; 124, string; 125, pair; 126, pair; 127, pair; 128, pair; 129, pair; 130, identifier:self; 131, identifier:redis; 132, identifier:self; 133, identifier:key; 134, attribute; 135, argument_list; 136, identifier:kwargs; 137, attribute; 138, identifier:georadius; 139, attribute; 140, identifier:longitude; 141, identifier:latitude; 142, identifier:radius; 143, dictionary_splat; 144, string_content:Must specify place, or both latitude and longitude; 145, string; 146, call; 147, string; 148, subscript; 149, string; 150, identifier:unit; 151, string; 152, subscript; 153, string; 154, subscript; 155, identifier:self; 156, identifier:_pickle; 157, identifier:place; 158, identifier:self; 159, identifier:redis; 160, identifier:self; 161, identifier:key; 162, identifier:kwargs; 163, string_content:place; 164, attribute; 165, argument_list; 166, string_content:distance; 167, identifier:item; 168, integer:1; 169, string_content:unit; 170, string_content:latitude; 171, subscript; 172, integer:1; 173, string_content:longitude; 174, subscript; 175, integer:0; 176, identifier:self; 177, identifier:_unpickle; 178, subscript; 179, identifier:item; 180, integer:2; 181, identifier:item; 182, integer:2; 183, identifier:item; 184, integer:0 | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 4, 22; 6, 23; 6, 24; 7, 25; 7, 26; 8, 27; 8, 28; 9, 29; 9, 30; 10, 31; 11, 32; 12, 33; 13, 34; 14, 35; 15, 36; 16, 37; 18, 38; 18, 39; 18, 40; 18, 41; 20, 42; 21, 43; 21, 44; 21, 45; 22, 46; 33, 47; 33, 48; 34, 49; 34, 50; 35, 51; 35, 52; 36, 53; 36, 54; 37, 55; 37, 56; 38, 57; 38, 58; 39, 59; 40, 60; 40, 61; 41, 62; 42, 63; 42, 64; 45, 65; 47, 66; 47, 67; 49, 68; 49, 69; 51, 70; 51, 71; 53, 72; 53, 73; 54, 74; 54, 75; 56, 76; 56, 77; 59, 78; 60, 79; 60, 80; 61, 81; 62, 82; 65, 83; 67, 84; 69, 85; 71, 86; 74, 87; 75, 88; 76, 89; 76, 90; 77, 91; 77, 92; 78, 93; 78, 94; 79, 95; 80, 96; 81, 97; 82, 98; 83, 99; 83, 100; 91, 101; 92, 102; 94, 103; 94, 104; 95, 105; 95, 106; 96, 107; 96, 108; 97, 109; 97, 110; 98, 111; 98, 112; 99, 113; 99, 114; 100, 115; 103, 116; 103, 117; 104, 118; 104, 119; 104, 120; 104, 121; 110, 122; 110, 123; 112, 124; 115, 125; 115, 126; 115, 127; 115, 128; 115, 129; 116, 130; 116, 131; 118, 132; 118, 133; 119, 134; 119, 135; 121, 136; 122, 137; 122, 138; 123, 139; 123, 140; 123, 141; 123, 142; 123, 143; 124, 144; 125, 145; 125, 146; 126, 147; 126, 148; 127, 149; 127, 150; 128, 151; 128, 152; 129, 153; 129, 154; 134, 155; 134, 156; 135, 157; 137, 158; 137, 159; 139, 160; 139, 161; 143, 162; 145, 163; 146, 164; 146, 165; 147, 166; 148, 167; 148, 168; 149, 169; 151, 170; 152, 171; 152, 172; 153, 173; 154, 174; 154, 175; 164, 176; 164, 177; 165, 178; 171, 179; 171, 180; 174, 181; 174, 182; 178, 183; 178, 184 | def places_within_radius(
self, place=None, latitude=None, longitude=None, radius=0, **kwargs
):
"""
Return descriptions of the places stored in the collection that are
within the circle specified by the given location and radius.
A list of dicts will be returned.
The center of the circle can be specified by the identifier of another
place in the collection with the *place* keyword argument.
Or, it can be specified by using both the *latitude* and *longitude*
keyword arguments.
By default the *radius* is given in kilometers, but you may also set
the *unit* keyword argument to ``'m'``, ``'mi'``, or ``'ft'``.
Limit the number of results returned with the *count* keyword argument.
Change the sorted order by setting the *sort* keyword argument to
``b'DESC'``.
"""
kwargs['withdist'] = True
kwargs['withcoord'] = True
kwargs['withhash'] = False
kwargs.setdefault('sort', 'ASC')
unit = kwargs.setdefault('unit', 'km')
# Make the query
if place is not None:
response = self.redis.georadiusbymember(
self.key, self._pickle(place), radius, **kwargs
)
elif (latitude is not None) and (longitude is not None):
response = self.redis.georadius(
self.key, longitude, latitude, radius, **kwargs
)
else:
raise ValueError(
'Must specify place, or both latitude and longitude'
)
# Assemble the result
ret = []
for item in response:
ret.append(
{
'place': self._unpickle(item[0]),
'distance': item[1],
'unit': unit,
'latitude': item[2][1],
'longitude': item[2][0],
}
)
return ret |
0, module; 1, function_definition; 2, function_name:naturalize_person; 3, parameters; 4, block; 5, identifier:self; 6, identifier:string; 7, expression_statement; 8, expression_statement; 9, comment:# Add lowercase versions:; 10, expression_statement; 11, comment:# If a name has a capitalised particle in we use that to sort.; 12, comment:# So 'Le Carre, John' but 'Carre, John le'.; 13, expression_statement; 14, expression_statement; 15, comment:# Smith; 16, expression_statement; 17, comment:# Fred James; 18, expression_statement; 19, comment:# Jr; 20, expression_statement; 21, expression_statement; 22, if_statement; 23, if_statement; 24, if_statement; 25, comment:# In case this name has any numbers in it.; 26, expression_statement; 27, return_statement; 28, comment:"""
Attempt to make a version of the string that has the surname, if any,
at the start.
'John, Brown' to 'Brown, John'
'Sir John Brown Jr' to 'Brown, Sir John Jr'
'Prince' to 'Prince'
string -- The string to change.
"""; 29, assignment; 30, assignment; 31, assignment; 32, assignment; 33, assignment; 34, assignment; 35, assignment; 36, assignment; 37, comparison_operator:parts[-1] in suffixes; 38, comment:# Remove suffixes entirely, as we'll add them back on the end.; 39, block; 40, comparison_operator:len(parts) > 1; 41, block; 42, identifier:suffix; 43, comment:# Add it back on.; 44, block; 45, assignment; 46, identifier:sort_string; 47, identifier:suffixes; 48, list; 49, identifier:suffixes; 50, binary_operator:suffixes + [s.lower() for s in suffixes]; 51, identifier:particles; 52, list; 53, identifier:surname; 54, string; 55, identifier:names; 56, string; 57, identifier:suffix; 58, string; 59, identifier:sort_string; 60, identifier:string; 61, identifier:parts; 62, call; 63, subscript; 64, identifier:suffixes; 65, expression_statement; 66, expression_statement; 67, comment:# Remove suffix from parts; 68, expression_statement; 69, call; 70, integer:1; 71, if_statement; 72, comment:# From 'David Foster Wallace' to 'Wallace, David Foster':; 73, expression_statement; 74, expression_statement; 75, identifier:sort_string; 76, call; 77, string; 78, string; 79, string; 80, string; 81, string; 82, string; 83, string; 84, string; 85, string; 86, identifier:suffixes; 87, list_comprehension; 88, string; 89, string; 90, string; 91, string; 92, string; 93, string; 94, attribute; 95, argument_list; 96, identifier:parts; 97, unary_operator; 98, assignment; 99, assignment; 100, assignment; 101, identifier:len; 102, argument_list; 103, comparison_operator:parts[-2] in particles; 104, comment:# From ['Alan', 'Barry', 'Le', 'Carré']; 105, comment:# to ['Alan', 'Barry', 'Le Carré']:; 106, block; 107, assignment; 108, assignment; 109, attribute; 110, argument_list; 111, string_content:Jr; 112, string_content:Jr.; 113, string_content:Sr; 114, string_content:Sr.; 115, string_content:I; 116, string_content:II; 117, string_content:III; 118, string_content:IV; 119, string_content:V; 120, call; 121, for_in_clause; 122, string_content:Le; 123, string_content:La; 124, string_content:Von; 125, string_content:Van; 126, string_content:Du; 127, string_content:De; 128, identifier:string; 129, identifier:split; 130, string; 131, integer:1; 132, identifier:suffix; 133, subscript; 134, identifier:parts; 135, subscript; 136, identifier:sort_string; 137, call; 138, identifier:parts; 139, subscript; 140, identifier:particles; 141, expression_statement; 142, identifier:sort_string; 143, call; 144, identifier:sort_string; 145, call; 146, identifier:self; 147, identifier:_naturalize_numbers; 148, identifier:sort_string; 149, attribute; 150, argument_list; 151, identifier:s; 152, identifier:suffixes; 153, string_content:; 154, identifier:parts; 155, unary_operator; 156, identifier:parts; 157, slice; 158, attribute; 159, argument_list; 160, identifier:parts; 161, unary_operator; 162, assignment; 163, attribute; 164, argument_list; 165, attribute; 166, argument_list; 167, identifier:s; 168, identifier:lower; 169, integer:1; 170, integer:0; 171, unary_operator; 172, string; 173, identifier:join; 174, identifier:parts; 175, integer:2; 176, identifier:parts; 177, binary_operator:parts[0:-2] + [ ' '.join(parts[-2:]) ]; 178, string; 179, identifier:format; 180, subscript; 181, call; 182, string; 183, identifier:format; 184, identifier:sort_string; 185, identifier:suffix; 186, integer:1; 187, string_content:; 188, subscript; 189, list; 190, string_content:{}, {}; 191, identifier:parts; 192, unary_operator; 193, attribute; 194, argument_list; 195, string_content:{} {}; 196, identifier:parts; 197, slice; 198, call; 199, integer:1; 200, string; 201, identifier:join; 202, subscript; 203, integer:0; 204, unary_operator; 205, attribute; 206, argument_list; 207, string_content:; 208, identifier:parts; 209, slice; 210, integer:2; 211, string; 212, identifier:join; 213, subscript; 214, unary_operator; 215, string_content:; 216, identifier:parts; 217, slice; 218, integer:1; 219, unary_operator; 220, integer:2 | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 4, 22; 4, 23; 4, 24; 4, 25; 4, 26; 4, 27; 7, 28; 8, 29; 10, 30; 13, 31; 14, 32; 16, 33; 18, 34; 20, 35; 21, 36; 22, 37; 22, 38; 22, 39; 23, 40; 23, 41; 24, 42; 24, 43; 24, 44; 26, 45; 27, 46; 29, 47; 29, 48; 30, 49; 30, 50; 31, 51; 31, 52; 32, 53; 32, 54; 33, 55; 33, 56; 34, 57; 34, 58; 35, 59; 35, 60; 36, 61; 36, 62; 37, 63; 37, 64; 39, 65; 39, 66; 39, 67; 39, 68; 40, 69; 40, 70; 41, 71; 41, 72; 41, 73; 44, 74; 45, 75; 45, 76; 48, 77; 48, 78; 48, 79; 48, 80; 48, 81; 48, 82; 48, 83; 48, 84; 48, 85; 50, 86; 50, 87; 52, 88; 52, 89; 52, 90; 52, 91; 52, 92; 52, 93; 62, 94; 62, 95; 63, 96; 63, 97; 65, 98; 66, 99; 68, 100; 69, 101; 69, 102; 71, 103; 71, 104; 71, 105; 71, 106; 73, 107; 74, 108; 76, 109; 76, 110; 77, 111; 78, 112; 79, 113; 80, 114; 81, 115; 82, 116; 83, 117; 84, 118; 85, 119; 87, 120; 87, 121; 88, 122; 89, 123; 90, 124; 91, 125; 92, 126; 93, 127; 94, 128; 94, 129; 95, 130; 97, 131; 98, 132; 98, 133; 99, 134; 99, 135; 100, 136; 100, 137; 102, 138; 103, 139; 103, 140; 106, 141; 107, 142; 107, 143; 108, 144; 108, 145; 109, 146; 109, 147; 110, 148; 120, 149; 120, 150; 121, 151; 121, 152; 130, 153; 133, 154; 133, 155; 135, 156; 135, 157; 137, 158; 137, 159; 139, 160; 139, 161; 141, 162; 143, 163; 143, 164; 145, 165; 145, 166; 149, 167; 149, 168; 155, 169; 157, 170; 157, 171; 158, 172; 158, 173; 159, 174; 161, 175; 162, 176; 162, 177; 163, 178; 163, 179; 164, 180; 164, 181; 165, 182; 165, 183; 166, 184; 166, 185; 171, 186; 172, 187; 177, 188; 177, 189; 178, 190; 180, 191; 180, 192; 181, 193; 181, 194; 182, 195; 188, 196; 188, 197; 189, 198; 192, 199; 193, 200; 193, 201; 194, 202; 197, 203; 197, 204; 198, 205; 198, 206; 200, 207; 202, 208; 202, 209; 204, 210; 205, 211; 205, 212; 206, 213; 209, 214; 211, 215; 213, 216; 213, 217; 214, 218; 217, 219; 219, 220 | def naturalize_person(self, string):
"""
Attempt to make a version of the string that has the surname, if any,
at the start.
'John, Brown' to 'Brown, John'
'Sir John Brown Jr' to 'Brown, Sir John Jr'
'Prince' to 'Prince'
string -- The string to change.
"""
suffixes = [
'Jr', 'Jr.', 'Sr', 'Sr.',
'I', 'II', 'III', 'IV', 'V',
]
# Add lowercase versions:
suffixes = suffixes + [s.lower() for s in suffixes]
# If a name has a capitalised particle in we use that to sort.
# So 'Le Carre, John' but 'Carre, John le'.
particles = [
'Le', 'La',
'Von', 'Van',
'Du', 'De',
]
surname = '' # Smith
names = '' # Fred James
suffix = '' # Jr
sort_string = string
parts = string.split(' ')
if parts[-1] in suffixes:
# Remove suffixes entirely, as we'll add them back on the end.
suffix = parts[-1]
parts = parts[0:-1] # Remove suffix from parts
sort_string = ' '.join(parts)
if len(parts) > 1:
if parts[-2] in particles:
# From ['Alan', 'Barry', 'Le', 'Carré']
# to ['Alan', 'Barry', 'Le Carré']:
parts = parts[0:-2] + [ ' '.join(parts[-2:]) ]
# From 'David Foster Wallace' to 'Wallace, David Foster':
sort_string = '{}, {}'.format(parts[-1], ' '.join(parts[:-1]))
if suffix:
# Add it back on.
sort_string = '{} {}'.format(sort_string, suffix)
# In case this name has any numbers in it.
sort_string = self._naturalize_numbers(sort_string)
return sort_string |
0, module; 1, function_definition; 2, function_name:get_countries; 3, parameters; 4, block; 5, identifier:self; 6, expression_statement; 7, expression_statement; 8, expression_statement; 9, for_statement; 10, return_statement; 11, comment:"""
Returns a list of dicts, one per country that has at least one Venue
in it.
Each dict has 'code' and 'name' elements.
The list is sorted by the country 'name's.
"""; 12, assignment; 13, assignment; 14, identifier:c; 15, identifier:qs; 16, block; 17, call; 18, identifier:qs; 19, call; 20, identifier:countries; 21, list; 22, expression_statement; 23, identifier:sorted; 24, argument_list; 25, attribute; 26, argument_list; 27, call; 28, identifier:countries; 29, keyword_argument; 30, call; 31, line_continuation:\; 32, identifier:order_by; 33, string; 34, attribute; 35, argument_list; 36, identifier:key; 37, lambda; 38, attribute; 39, argument_list; 40, string_content:country; 41, identifier:countries; 42, identifier:append; 43, dictionary; 44, lambda_parameters; 45, subscript; 46, call; 47, line_continuation:\; 48, identifier:distinct; 49, pair; 50, pair; 51, identifier:k; 52, identifier:k; 53, string; 54, attribute; 55, argument_list; 56, string; 57, subscript; 58, string; 59, call; 60, string_content:name; 61, call; 62, line_continuation:\; 63, identifier:exclude; 64, keyword_argument; 65, string_content:code; 66, identifier:c; 67, string; 68, string_content:name; 69, attribute; 70, argument_list; 71, attribute; 72, argument_list; 73, identifier:country; 74, string; 75, string_content:country; 76, identifier:Venue; 77, identifier:get_country_name; 78, subscript; 79, attribute; 80, identifier:values; 81, string; 82, identifier:c; 83, string; 84, identifier:Venue; 85, identifier:objects; 86, string_content:country; 87, string_content:country | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 6, 11; 7, 12; 8, 13; 9, 14; 9, 15; 9, 16; 10, 17; 12, 18; 12, 19; 13, 20; 13, 21; 16, 22; 17, 23; 17, 24; 19, 25; 19, 26; 22, 27; 24, 28; 24, 29; 25, 30; 25, 31; 25, 32; 26, 33; 27, 34; 27, 35; 29, 36; 29, 37; 30, 38; 30, 39; 33, 40; 34, 41; 34, 42; 35, 43; 37, 44; 37, 45; 38, 46; 38, 47; 38, 48; 43, 49; 43, 50; 44, 51; 45, 52; 45, 53; 46, 54; 46, 55; 49, 56; 49, 57; 50, 58; 50, 59; 53, 60; 54, 61; 54, 62; 54, 63; 55, 64; 56, 65; 57, 66; 57, 67; 58, 68; 59, 69; 59, 70; 61, 71; 61, 72; 64, 73; 64, 74; 67, 75; 69, 76; 69, 77; 70, 78; 71, 79; 71, 80; 72, 81; 78, 82; 78, 83; 79, 84; 79, 85; 81, 86; 83, 87 | def get_countries(self):
"""
Returns a list of dicts, one per country that has at least one Venue
in it.
Each dict has 'code' and 'name' elements.
The list is sorted by the country 'name's.
"""
qs = Venue.objects.values('country') \
.exclude(country='') \
.distinct() \
.order_by('country')
countries = []
for c in qs:
countries.append({
'code': c['country'],
'name': Venue.get_country_name(c['country'])
})
return sorted(countries, key=lambda k: k['name']) |
0, module; 1, function_definition; 2, function_name:fit; 3, parameters; 4, block; 5, identifier:self; 6, identifier:t; 7, identifier:y; 8, default_parameter; 9, default_parameter; 10, expression_statement; 11, expression_statement; 12, expression_statement; 13, return_statement; 14, identifier:dy; 15, integer:1; 16, identifier:presorted; 17, False; 18, comment:"""Fit the smoother
Parameters
----------
t : array_like
time locations of the points to smooth
y : array_like
y locations of the points to smooth
dy : array_like or float (default = 1)
Errors in the y values
presorted : bool (default = False)
If True, then t is assumed to be sorted.
Returns
-------
self : Smoother instance
"""; 19, assignment; 20, call; 21, identifier:self; 22, pattern_list; 23, call; 24, attribute; 25, argument_list; 26, attribute; 27, attribute; 28, attribute; 29, attribute; 30, argument_list; 31, identifier:self; 32, identifier:_fit; 33, attribute; 34, attribute; 35, attribute; 36, identifier:self; 37, identifier:t; 38, identifier:self; 39, identifier:y; 40, identifier:self; 41, identifier:dy; 42, identifier:self; 43, identifier:_validate_inputs; 44, identifier:t; 45, identifier:y; 46, identifier:dy; 47, identifier:presorted; 48, identifier:self; 49, identifier:t; 50, identifier:self; 51, identifier:y; 52, identifier:self; 53, identifier:dy | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 4, 10; 4, 11; 4, 12; 4, 13; 8, 14; 8, 15; 9, 16; 9, 17; 10, 18; 11, 19; 12, 20; 13, 21; 19, 22; 19, 23; 20, 24; 20, 25; 22, 26; 22, 27; 22, 28; 23, 29; 23, 30; 24, 31; 24, 32; 25, 33; 25, 34; 25, 35; 26, 36; 26, 37; 27, 38; 27, 39; 28, 40; 28, 41; 29, 42; 29, 43; 30, 44; 30, 45; 30, 46; 30, 47; 33, 48; 33, 49; 34, 50; 34, 51; 35, 52; 35, 53 | def fit(self, t, y, dy=1, presorted=False):
"""Fit the smoother
Parameters
----------
t : array_like
time locations of the points to smooth
y : array_like
y locations of the points to smooth
dy : array_like or float (default = 1)
Errors in the y values
presorted : bool (default = False)
If True, then t is assumed to be sorted.
Returns
-------
self : Smoother instance
"""
self.t, self.y, self.dy = self._validate_inputs(t, y, dy, presorted)
self._fit(self.t, self.y, self.dy)
return self |
0, module; 1, function_definition; 2, function_name:validate_inputs; 3, parameters; 4, block; 5, list_splat_pattern; 6, dictionary_splat_pattern; 7, expression_statement; 8, expression_statement; 9, expression_statement; 10, if_statement; 11, if_statement; 12, if_statement; 13, return_statement; 14, identifier:arrays; 15, identifier:kwargs; 16, comment:"""Validate input arrays
This checks that
- Arrays are mutually broadcastable
- Broadcasted arrays are one-dimensional
Optionally, arrays are sorted according to the ``sort_by`` argument.
Parameters
----------
*args : ndarrays
All non-keyword arguments are arrays which will be validated
sort_by : array
If specified, sort all inputs by the order given in this array.
"""; 17, assignment; 18, assignment; 19, identifier:kwargs; 20, block; 21, comparison_operator:arrays[0].ndim != 1; 22, block; 23, comparison_operator:sort_by is not None; 24, block; 25, identifier:arrays; 26, identifier:arrays; 27, call; 28, identifier:sort_by; 29, call; 30, raise_statement; 31, attribute; 32, integer:1; 33, raise_statement; 34, identifier:sort_by; 35, None; 36, expression_statement; 37, if_statement; 38, expression_statement; 39, attribute; 40, argument_list; 41, attribute; 42, argument_list; 43, call; 44, subscript; 45, identifier:ndim; 46, call; 47, assignment; 48, comparison_operator:isort.shape != arrays[0].shape; 49, block; 50, assignment; 51, identifier:np; 52, identifier:broadcast_arrays; 53, list_splat; 54, identifier:kwargs; 55, identifier:pop; 56, string; 57, None; 58, identifier:ValueError; 59, argument_list; 60, identifier:arrays; 61, integer:0; 62, identifier:ValueError; 63, argument_list; 64, identifier:isort; 65, call; 66, attribute; 67, attribute; 68, raise_statement; 69, identifier:arrays; 70, call; 71, identifier:arrays; 72, string_content:sort_by; 73, call; 74, string:"Input arrays should be one-dimensional."; 75, attribute; 76, argument_list; 77, identifier:isort; 78, identifier:shape; 79, subscript; 80, identifier:shape; 81, call; 82, identifier:tuple; 83, argument_list; 84, attribute; 85, argument_list; 86, identifier:np; 87, identifier:argsort; 88, identifier:sort_by; 89, identifier:arrays; 90, integer:0; 91, identifier:ValueError; 92, argument_list; 93, list_comprehension; 94, string:"unrecognized arguments: {0}"; 95, identifier:format; 96, call; 97, string:"sort shape must equal array shape."; 98, subscript; 99, for_in_clause; 100, attribute; 101, argument_list; 102, identifier:a; 103, identifier:isort; 104, identifier:a; 105, identifier:arrays; 106, identifier:kwargs; 107, identifier:keys | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 5, 14; 6, 15; 7, 16; 8, 17; 9, 18; 10, 19; 10, 20; 11, 21; 11, 22; 12, 23; 12, 24; 13, 25; 17, 26; 17, 27; 18, 28; 18, 29; 20, 30; 21, 31; 21, 32; 22, 33; 23, 34; 23, 35; 24, 36; 24, 37; 24, 38; 27, 39; 27, 40; 29, 41; 29, 42; 30, 43; 31, 44; 31, 45; 33, 46; 36, 47; 37, 48; 37, 49; 38, 50; 39, 51; 39, 52; 40, 53; 41, 54; 41, 55; 42, 56; 42, 57; 43, 58; 43, 59; 44, 60; 44, 61; 46, 62; 46, 63; 47, 64; 47, 65; 48, 66; 48, 67; 49, 68; 50, 69; 50, 70; 53, 71; 56, 72; 59, 73; 63, 74; 65, 75; 65, 76; 66, 77; 66, 78; 67, 79; 67, 80; 68, 81; 70, 82; 70, 83; 73, 84; 73, 85; 75, 86; 75, 87; 76, 88; 79, 89; 79, 90; 81, 91; 81, 92; 83, 93; 84, 94; 84, 95; 85, 96; 92, 97; 93, 98; 93, 99; 96, 100; 96, 101; 98, 102; 98, 103; 99, 104; 99, 105; 100, 106; 100, 107 | def validate_inputs(*arrays, **kwargs):
"""Validate input arrays
This checks that
- Arrays are mutually broadcastable
- Broadcasted arrays are one-dimensional
Optionally, arrays are sorted according to the ``sort_by`` argument.
Parameters
----------
*args : ndarrays
All non-keyword arguments are arrays which will be validated
sort_by : array
If specified, sort all inputs by the order given in this array.
"""
arrays = np.broadcast_arrays(*arrays)
sort_by = kwargs.pop('sort_by', None)
if kwargs:
raise ValueError("unrecognized arguments: {0}".format(kwargs.keys()))
if arrays[0].ndim != 1:
raise ValueError("Input arrays should be one-dimensional.")
if sort_by is not None:
isort = np.argsort(sort_by)
if isort.shape != arrays[0].shape:
raise ValueError("sort shape must equal array shape.")
arrays = tuple([a[isort] for a in arrays])
return arrays |
0, module; 1, function_definition; 2, function_name:multinterp; 3, parameters; 4, block; 5, identifier:x; 6, identifier:y; 7, identifier:xquery; 8, default_parameter; 9, expression_statement; 10, expression_statement; 11, assert_statement; 12, assert_statement; 13, assert_statement; 14, comment:# make sure xmin < xquery < xmax in all cases; 15, expression_statement; 16, if_statement; 17, identifier:slow; 18, False; 19, comment:"""Multiple linear interpolations
Parameters
----------
x : array_like, shape=(N,)
sorted array of x values
y : array_like, shape=(N, M)
array of y values corresponding to each x value
xquery : array_like, shape=(M,)
array of query values
slow : boolean, default=False
if True, use slow method (used mainly for unit testing)
Returns
-------
yquery : ndarray, shape=(M,)
The interpolated values corresponding to each x query.
"""; 20, assignment; 21, comparison_operator:x.ndim == 1; 22, comparison_operator:xquery.ndim == 1; 23, comparison_operator:y.shape == x.shape + xquery.shape; 24, assignment; 25, identifier:slow; 26, block; 27, elif_clause; 28, else_clause; 29, pattern_list; 30, call; 31, attribute; 32, integer:1; 33, attribute; 34, integer:1; 35, attribute; 36, binary_operator:x.shape + xquery.shape; 37, identifier:xquery; 38, call; 39, import_from_statement; 40, return_statement; 41, comparison_operator:len(x) == 3; 42, comment:# Most common case: use a faster approach; 43, block; 44, block; 45, identifier:x; 46, identifier:y; 47, identifier:xquery; 48, identifier:map; 49, argument_list; 50, identifier:x; 51, identifier:ndim; 52, identifier:xquery; 53, identifier:ndim; 54, identifier:y; 55, identifier:shape; 56, attribute; 57, attribute; 58, attribute; 59, argument_list; 60, dotted_name; 61, dotted_name; 62, call; 63, call; 64, integer:3; 65, expression_statement; 66, expression_statement; 67, return_statement; 68, expression_statement; 69, expression_statement; 70, return_statement; 71, attribute; 72, tuple; 73, identifier:x; 74, identifier:shape; 75, identifier:xquery; 76, identifier:shape; 77, identifier:np; 78, identifier:clip; 79, identifier:xquery; 80, call; 81, call; 82, identifier:scipy; 83, identifier:interpolate; 84, identifier:interp1d; 85, attribute; 86, argument_list; 87, identifier:len; 88, argument_list; 89, assignment; 90, assignment; 91, call; 92, assignment; 93, assignment; 94, binary_operator:y[i, j] + ((xquery - x[i]) *
(y[i + 1, j] - y[i, j]) / (x[i + 1] - x[i])); 95, identifier:np; 96, identifier:asarray; 97, identifier:x; 98, identifier:y; 99, identifier:xquery; 100, attribute; 101, argument_list; 102, attribute; 103, argument_list; 104, identifier:np; 105, identifier:array; 106, list_comprehension; 107, identifier:x; 108, identifier:yq_lower; 109, binary_operator:y[0] + (xquery - x[0]) * (y[1] - y[0]) / (x[1] - x[0]); 110, identifier:yq_upper; 111, binary_operator:y[1] + (xquery - x[1]) * (y[2] - y[1]) / (x[2] - x[1]); 112, attribute; 113, argument_list; 114, identifier:i; 115, call; 116, identifier:j; 117, call; 118, subscript; 119, parenthesized_expression; 120, identifier:x; 121, identifier:min; 122, identifier:x; 123, identifier:max; 124, call; 125, for_in_clause; 126, subscript; 127, binary_operator:(xquery - x[0]) * (y[1] - y[0]) / (x[1] - x[0]); 128, subscript; 129, binary_operator:(xquery - x[1]) * (y[2] - y[1]) / (x[2] - x[1]); 130, identifier:np; 131, identifier:where; 132, comparison_operator:xquery < x[1]; 133, identifier:yq_lower; 134, identifier:yq_upper; 135, attribute; 136, argument_list; 137, attribute; 138, argument_list; 139, identifier:y; 140, identifier:i; 141, identifier:j; 142, binary_operator:(xquery - x[i]) *
(y[i + 1, j] - y[i, j]) / (x[i + 1] - x[i]); 143, call; 144, argument_list; 145, pattern_list; 146, call; 147, identifier:y; 148, integer:0; 149, binary_operator:(xquery - x[0]) * (y[1] - y[0]); 150, parenthesized_expression; 151, identifier:y; 152, integer:1; 153, binary_operator:(xquery - x[1]) * (y[2] - y[1]); 154, parenthesized_expression; 155, identifier:xquery; 156, subscript; 157, identifier:np; 158, identifier:clip; 159, binary_operator:np.searchsorted(x, xquery, side='right') - 1; 160, integer:0; 161, binary_operator:len(x) - 2; 162, identifier:np; 163, identifier:arange; 164, call; 165, binary_operator:(xquery - x[i]) *
(y[i + 1, j] - y[i, j]); 166, parenthesized_expression; 167, identifier:interp1d; 168, argument_list; 169, identifier:xq; 170, identifier:xq; 171, identifier:y; 172, identifier:zip; 173, argument_list; 174, parenthesized_expression; 175, parenthesized_expression; 176, binary_operator:x[1] - x[0]; 177, parenthesized_expression; 178, parenthesized_expression; 179, binary_operator:x[2] - x[1]; 180, identifier:x; 181, integer:1; 182, call; 183, integer:1; 184, call; 185, integer:2; 186, identifier:len; 187, argument_list; 188, parenthesized_expression; 189, parenthesized_expression; 190, binary_operator:x[i + 1] - x[i]; 191, identifier:x; 192, identifier:y; 193, identifier:xquery; 194, attribute; 195, binary_operator:xquery - x[0]; 196, binary_operator:y[1] - y[0]; 197, subscript; 198, subscript; 199, binary_operator:xquery - x[1]; 200, binary_operator:y[2] - y[1]; 201, subscript; 202, subscript; 203, attribute; 204, argument_list; 205, identifier:len; 206, argument_list; 207, identifier:xquery; 208, binary_operator:xquery - x[i]; 209, binary_operator:y[i + 1, j] - y[i, j]; 210, subscript; 211, subscript; 212, identifier:y; 213, identifier:T; 214, identifier:xquery; 215, subscript; 216, subscript; 217, subscript; 218, identifier:x; 219, integer:1; 220, identifier:x; 221, integer:0; 222, identifier:xquery; 223, subscript; 224, subscript; 225, subscript; 226, identifier:x; 227, integer:2; 228, identifier:x; 229, integer:1; 230, identifier:np; 231, identifier:searchsorted; 232, identifier:x; 233, identifier:xquery; 234, keyword_argument; 235, identifier:x; 236, identifier:xquery; 237, subscript; 238, subscript; 239, subscript; 240, identifier:x; 241, binary_operator:i + 1; 242, identifier:x; 243, identifier:i; 244, identifier:x; 245, integer:0; 246, identifier:y; 247, integer:1; 248, identifier:y; 249, integer:0; 250, identifier:x; 251, integer:1; 252, identifier:y; 253, integer:2; 254, identifier:y; 255, integer:1; 256, identifier:side; 257, string; 258, identifier:x; 259, identifier:i; 260, identifier:y; 261, binary_operator:i + 1; 262, identifier:j; 263, identifier:y; 264, identifier:i; 265, identifier:j; 266, identifier:i; 267, integer:1; 268, string_content:right; 269, identifier:i; 270, integer:1 | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 8, 17; 8, 18; 9, 19; 10, 20; 11, 21; 12, 22; 13, 23; 15, 24; 16, 25; 16, 26; 16, 27; 16, 28; 20, 29; 20, 30; 21, 31; 21, 32; 22, 33; 22, 34; 23, 35; 23, 36; 24, 37; 24, 38; 26, 39; 26, 40; 27, 41; 27, 42; 27, 43; 28, 44; 29, 45; 29, 46; 29, 47; 30, 48; 30, 49; 31, 50; 31, 51; 33, 52; 33, 53; 35, 54; 35, 55; 36, 56; 36, 57; 38, 58; 38, 59; 39, 60; 39, 61; 40, 62; 41, 63; 41, 64; 43, 65; 43, 66; 43, 67; 44, 68; 44, 69; 44, 70; 49, 71; 49, 72; 56, 73; 56, 74; 57, 75; 57, 76; 58, 77; 58, 78; 59, 79; 59, 80; 59, 81; 60, 82; 60, 83; 61, 84; 62, 85; 62, 86; 63, 87; 63, 88; 65, 89; 66, 90; 67, 91; 68, 92; 69, 93; 70, 94; 71, 95; 71, 96; 72, 97; 72, 98; 72, 99; 80, 100; 80, 101; 81, 102; 81, 103; 85, 104; 85, 105; 86, 106; 88, 107; 89, 108; 89, 109; 90, 110; 90, 111; 91, 112; 91, 113; 92, 114; 92, 115; 93, 116; 93, 117; 94, 118; 94, 119; 100, 120; 100, 121; 102, 122; 102, 123; 106, 124; 106, 125; 109, 126; 109, 127; 111, 128; 111, 129; 112, 130; 112, 131; 113, 132; 113, 133; 113, 134; 115, 135; 115, 136; 117, 137; 117, 138; 118, 139; 118, 140; 118, 141; 119, 142; 124, 143; 124, 144; 125, 145; 125, 146; 126, 147; 126, 148; 127, 149; 127, 150; 128, 151; 128, 152; 129, 153; 129, 154; 132, 155; 132, 156; 135, 157; 135, 158; 136, 159; 136, 160; 136, 161; 137, 162; 137, 163; 138, 164; 142, 165; 142, 166; 143, 167; 143, 168; 144, 169; 145, 170; 145, 171; 146, 172; 146, 173; 149, 174; 149, 175; 150, 176; 153, 177; 153, 178; 154, 179; 156, 180; 156, 181; 159, 182; 159, 183; 161, 184; 161, 185; 164, 186; 164, 187; 165, 188; 165, 189; 166, 190; 168, 191; 168, 192; 173, 193; 173, 194; 174, 195; 175, 196; 176, 197; 176, 198; 177, 199; 178, 200; 179, 201; 179, 202; 182, 203; 182, 204; 184, 205; 184, 206; 187, 207; 188, 208; 189, 209; 190, 210; 190, 211; 194, 212; 194, 213; 195, 214; 195, 215; 196, 216; 196, 217; 197, 218; 197, 219; 198, 220; 198, 221; 199, 222; 199, 223; 200, 224; 200, 225; 201, 226; 201, 227; 202, 228; 202, 229; 203, 230; 203, 231; 204, 232; 204, 233; 204, 234; 206, 235; 208, 236; 208, 237; 209, 238; 209, 239; 210, 240; 210, 241; 211, 242; 211, 243; 215, 244; 215, 245; 216, 246; 216, 247; 217, 248; 217, 249; 223, 250; 223, 251; 224, 252; 224, 253; 225, 254; 225, 255; 234, 256; 234, 257; 237, 258; 237, 259; 238, 260; 238, 261; 238, 262; 239, 263; 239, 264; 239, 265; 241, 266; 241, 267; 257, 268; 261, 269; 261, 270 | def multinterp(x, y, xquery, slow=False):
"""Multiple linear interpolations
Parameters
----------
x : array_like, shape=(N,)
sorted array of x values
y : array_like, shape=(N, M)
array of y values corresponding to each x value
xquery : array_like, shape=(M,)
array of query values
slow : boolean, default=False
if True, use slow method (used mainly for unit testing)
Returns
-------
yquery : ndarray, shape=(M,)
The interpolated values corresponding to each x query.
"""
x, y, xquery = map(np.asarray, (x, y, xquery))
assert x.ndim == 1
assert xquery.ndim == 1
assert y.shape == x.shape + xquery.shape
# make sure xmin < xquery < xmax in all cases
xquery = np.clip(xquery, x.min(), x.max())
if slow:
from scipy.interpolate import interp1d
return np.array([interp1d(x, y)(xq) for xq, y in zip(xquery, y.T)])
elif len(x) == 3:
# Most common case: use a faster approach
yq_lower = y[0] + (xquery - x[0]) * (y[1] - y[0]) / (x[1] - x[0])
yq_upper = y[1] + (xquery - x[1]) * (y[2] - y[1]) / (x[2] - x[1])
return np.where(xquery < x[1], yq_lower, yq_upper)
else:
i = np.clip(np.searchsorted(x, xquery, side='right') - 1,
0, len(x) - 2)
j = np.arange(len(xquery))
return y[i, j] + ((xquery - x[i]) *
(y[i + 1, j] - y[i, j]) / (x[i + 1] - x[i])) |
0, module; 1, function_definition; 2, function_name:check_valid_time_and_sort; 3, parameters; 4, block; 5, identifier:df; 6, identifier:timescol; 7, default_parameter; 8, default_parameter; 9, expression_statement; 10, expression_statement; 11, if_statement; 12, identifier:days; 13, integer:5; 14, identifier:warning; 15, True; 16, comment:"""Check if the data contains reads created within the same `days` timeframe.
if not, print warning and only return part of the data which is within `days` days
Resetting the index twice to get also an "index" column for plotting the cum_yield_reads plot
"""; 17, assignment; 18, comparison_operator:timediff < days; 19, block; 20, else_clause; 21, identifier:timediff; 22, attribute; 23, identifier:timediff; 24, identifier:days; 25, return_statement; 26, block; 27, parenthesized_expression; 28, identifier:days; 29, call; 30, if_statement; 31, return_statement; 32, binary_operator:df[timescol].max() - df[timescol].min(); 33, attribute; 34, argument_list; 35, identifier:warning; 36, block; 37, call; 38, call; 39, call; 40, call; 41, identifier:reset_index; 42, expression_statement; 43, expression_statement; 44, expression_statement; 45, expression_statement; 46, attribute; 47, argument_list; 48, attribute; 49, argument_list; 50, attribute; 51, argument_list; 52, attribute; 53, argument_list; 54, call; 55, call; 56, call; 57, call; 58, call; 59, line_continuation:\; 60, identifier:reset_index; 61, subscript; 62, identifier:max; 63, subscript; 64, identifier:min; 65, call; 66, identifier:reset_index; 67, keyword_argument; 68, attribute; 69, argument_list; 70, attribute; 71, argument_list; 72, attribute; 73, argument_list; 74, attribute; 75, argument_list; 76, attribute; 77, argument_list; 78, identifier:df; 79, identifier:timescol; 80, identifier:df; 81, identifier:timescol; 82, attribute; 83, argument_list; 84, identifier:drop; 85, True; 86, attribute; 87, identifier:write; 88, call; 89, attribute; 90, identifier:write; 91, string:"Likely this indicates you are combining multiple runs.\n"; 92, attribute; 93, identifier:write; 94, call; 95, identifier:logging; 96, identifier:warning; 97, call; 98, call; 99, line_continuation:\; 100, identifier:reset_index; 101, keyword_argument; 102, identifier:df; 103, identifier:sort_values; 104, identifier:timescol; 105, identifier:sys; 106, identifier:stderr; 107, attribute; 108, argument_list; 109, identifier:sys; 110, identifier:stderr; 111, identifier:sys; 112, identifier:stderr; 113, attribute; 114, argument_list; 115, attribute; 116, argument_list; 117, attribute; 118, argument_list; 119, identifier:drop; 120, True; 121, string:"\nWarning: data generated is from more than {} days.\n"; 122, identifier:format; 123, call; 124, string:"Plots based on time are invalid and therefore truncated to first {} days.\n\n"; 125, identifier:format; 126, call; 127, string:"Time plots truncated to first {} days: invalid timespan: {} days"; 128, identifier:format; 129, call; 130, call; 131, subscript; 132, line_continuation:\; 133, identifier:sort_values; 134, identifier:timescol; 135, identifier:str; 136, argument_list; 137, identifier:str; 138, argument_list; 139, identifier:str; 140, argument_list; 141, identifier:str; 142, argument_list; 143, identifier:df; 144, comparison_operator:df[timescol] < timedelta(days=days); 145, identifier:days; 146, identifier:days; 147, identifier:days; 148, identifier:timediff; 149, subscript; 150, call; 151, identifier:df; 152, identifier:timescol; 153, identifier:timedelta; 154, argument_list; 155, keyword_argument; 156, identifier:days; 157, identifier:days | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 7, 12; 7, 13; 8, 14; 8, 15; 9, 16; 10, 17; 11, 18; 11, 19; 11, 20; 17, 21; 17, 22; 18, 23; 18, 24; 19, 25; 20, 26; 22, 27; 22, 28; 25, 29; 26, 30; 26, 31; 27, 32; 29, 33; 29, 34; 30, 35; 30, 36; 31, 37; 32, 38; 32, 39; 33, 40; 33, 41; 36, 42; 36, 43; 36, 44; 36, 45; 37, 46; 37, 47; 38, 48; 38, 49; 39, 50; 39, 51; 40, 52; 40, 53; 42, 54; 43, 55; 44, 56; 45, 57; 46, 58; 46, 59; 46, 60; 48, 61; 48, 62; 50, 63; 50, 64; 52, 65; 52, 66; 53, 67; 54, 68; 54, 69; 55, 70; 55, 71; 56, 72; 56, 73; 57, 74; 57, 75; 58, 76; 58, 77; 61, 78; 61, 79; 63, 80; 63, 81; 65, 82; 65, 83; 67, 84; 67, 85; 68, 86; 68, 87; 69, 88; 70, 89; 70, 90; 71, 91; 72, 92; 72, 93; 73, 94; 74, 95; 74, 96; 75, 97; 76, 98; 76, 99; 76, 100; 77, 101; 82, 102; 82, 103; 83, 104; 86, 105; 86, 106; 88, 107; 88, 108; 89, 109; 89, 110; 92, 111; 92, 112; 94, 113; 94, 114; 97, 115; 97, 116; 98, 117; 98, 118; 101, 119; 101, 120; 107, 121; 107, 122; 108, 123; 113, 124; 113, 125; 114, 126; 115, 127; 115, 128; 116, 129; 116, 130; 117, 131; 117, 132; 117, 133; 118, 134; 123, 135; 123, 136; 126, 137; 126, 138; 129, 139; 129, 140; 130, 141; 130, 142; 131, 143; 131, 144; 136, 145; 138, 146; 140, 147; 142, 148; 144, 149; 144, 150; 149, 151; 149, 152; 150, 153; 150, 154; 154, 155; 155, 156; 155, 157 | def check_valid_time_and_sort(df, timescol, days=5, warning=True):
"""Check if the data contains reads created within the same `days` timeframe.
if not, print warning and only return part of the data which is within `days` days
Resetting the index twice to get also an "index" column for plotting the cum_yield_reads plot
"""
timediff = (df[timescol].max() - df[timescol].min()).days
if timediff < days:
return df.sort_values(timescol).reset_index(drop=True).reset_index()
else:
if warning:
sys.stderr.write(
"\nWarning: data generated is from more than {} days.\n".format(str(days)))
sys.stderr.write("Likely this indicates you are combining multiple runs.\n")
sys.stderr.write(
"Plots based on time are invalid and therefore truncated to first {} days.\n\n"
.format(str(days)))
logging.warning("Time plots truncated to first {} days: invalid timespan: {} days"
.format(str(days), str(timediff)))
return df[df[timescol] < timedelta(days=days)] \
.sort_values(timescol) \
.reset_index(drop=True) \
.reset_index() |
0, module; 1, function_definition; 2, function_name:list_anime_series; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, expression_statement; 10, expression_statement; 11, return_statement; 12, identifier:sort; 13, attribute; 14, identifier:limit; 15, attribute; 16, identifier:offset; 17, integer:0; 18, comment:"""Get a list of anime series
@param str sort pick how results should be sorted, should be one
of META.SORT_*
@param int limit limit number of series to return, there doesn't
seem to be an upper bound
@param int offset list series starting from this offset, for pagination
@return list<crunchyroll.models.Series>
"""; 19, assignment; 20, identifier:result; 21, identifier:META; 22, identifier:SORT_ALPHA; 23, identifier:META; 24, identifier:MAX_SERIES; 25, identifier:result; 26, call; 27, attribute; 28, argument_list; 29, attribute; 30, identifier:list_series; 31, keyword_argument; 32, keyword_argument; 33, keyword_argument; 34, keyword_argument; 35, identifier:self; 36, identifier:_android_api; 37, identifier:media_type; 38, attribute; 39, identifier:filter; 40, identifier:sort; 41, identifier:limit; 42, identifier:limit; 43, identifier:offset; 44, identifier:offset; 45, identifier:ANDROID; 46, identifier:MEDIA_TYPE_ANIME | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 6, 12; 6, 13; 7, 14; 7, 15; 8, 16; 8, 17; 9, 18; 10, 19; 11, 20; 13, 21; 13, 22; 15, 23; 15, 24; 19, 25; 19, 26; 26, 27; 26, 28; 27, 29; 27, 30; 28, 31; 28, 32; 28, 33; 28, 34; 29, 35; 29, 36; 31, 37; 31, 38; 32, 39; 32, 40; 33, 41; 33, 42; 34, 43; 34, 44; 38, 45; 38, 46 | def list_anime_series(self, sort=META.SORT_ALPHA, limit=META.MAX_SERIES, offset=0):
"""Get a list of anime series
@param str sort pick how results should be sorted, should be one
of META.SORT_*
@param int limit limit number of series to return, there doesn't
seem to be an upper bound
@param int offset list series starting from this offset, for pagination
@return list<crunchyroll.models.Series>
"""
result = self._android_api.list_series(
media_type=ANDROID.MEDIA_TYPE_ANIME,
filter=sort,
limit=limit,
offset=offset)
return result |
0, module; 1, function_definition; 2, function_name:list_drama_series; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, expression_statement; 10, expression_statement; 11, return_statement; 12, identifier:sort; 13, attribute; 14, identifier:limit; 15, attribute; 16, identifier:offset; 17, integer:0; 18, comment:"""Get a list of drama series
@param str sort pick how results should be sorted, should be one
of META.SORT_*
@param int limit limit number of series to return, there doesn't
seem to be an upper bound
@param int offset list series starting from this offset, for pagination
@return list<crunchyroll.models.Series>
"""; 19, assignment; 20, identifier:result; 21, identifier:META; 22, identifier:SORT_ALPHA; 23, identifier:META; 24, identifier:MAX_SERIES; 25, identifier:result; 26, call; 27, attribute; 28, argument_list; 29, attribute; 30, identifier:list_series; 31, keyword_argument; 32, keyword_argument; 33, keyword_argument; 34, keyword_argument; 35, identifier:self; 36, identifier:_android_api; 37, identifier:media_type; 38, attribute; 39, identifier:filter; 40, identifier:sort; 41, identifier:limit; 42, identifier:limit; 43, identifier:offset; 44, identifier:offset; 45, identifier:ANDROID; 46, identifier:MEDIA_TYPE_DRAMA | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 6, 12; 6, 13; 7, 14; 7, 15; 8, 16; 8, 17; 9, 18; 10, 19; 11, 20; 13, 21; 13, 22; 15, 23; 15, 24; 19, 25; 19, 26; 26, 27; 26, 28; 27, 29; 27, 30; 28, 31; 28, 32; 28, 33; 28, 34; 29, 35; 29, 36; 31, 37; 31, 38; 32, 39; 32, 40; 33, 41; 33, 42; 34, 43; 34, 44; 38, 45; 38, 46 | def list_drama_series(self, sort=META.SORT_ALPHA, limit=META.MAX_SERIES, offset=0):
"""Get a list of drama series
@param str sort pick how results should be sorted, should be one
of META.SORT_*
@param int limit limit number of series to return, there doesn't
seem to be an upper bound
@param int offset list series starting from this offset, for pagination
@return list<crunchyroll.models.Series>
"""
result = self._android_api.list_series(
media_type=ANDROID.MEDIA_TYPE_DRAMA,
filter=sort,
limit=limit,
offset=offset)
return result |
0, module; 1, function_definition; 2, function_name:list_media; 3, parameters; 4, block; 5, identifier:self; 6, identifier:series; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, expression_statement; 11, expression_statement; 12, expression_statement; 13, expression_statement; 14, return_statement; 15, identifier:sort; 16, attribute; 17, identifier:limit; 18, attribute; 19, identifier:offset; 20, integer:0; 21, comment:"""List media for a given series or collection
@param crunchyroll.models.Series series the series to search for
@param str sort choose the ordering of the
results, only META.SORT_DESC
is known to work
@param int limit limit size of results
@param int offset start results from this index,
for pagination
@return list<crunchyroll.models.Media>
"""; 22, assignment; 23, call; 24, assignment; 25, identifier:result; 26, identifier:META; 27, identifier:SORT_DESC; 28, identifier:META; 29, identifier:MAX_MEDIA; 30, identifier:params; 31, dictionary; 32, attribute; 33, argument_list; 34, identifier:result; 35, call; 36, pair; 37, pair; 38, pair; 39, identifier:params; 40, identifier:update; 41, call; 42, attribute; 43, argument_list; 44, string; 45, identifier:sort; 46, string; 47, identifier:offset; 48, string; 49, identifier:limit; 50, attribute; 51, argument_list; 52, attribute; 53, identifier:list_media; 54, dictionary_splat; 55, string_content:sort; 56, string_content:offset; 57, string_content:limit; 58, identifier:self; 59, identifier:_get_series_query_dict; 60, identifier:series; 61, identifier:self; 62, identifier:_android_api; 63, identifier:params | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 7, 15; 7, 16; 8, 17; 8, 18; 9, 19; 9, 20; 10, 21; 11, 22; 12, 23; 13, 24; 14, 25; 16, 26; 16, 27; 18, 28; 18, 29; 22, 30; 22, 31; 23, 32; 23, 33; 24, 34; 24, 35; 31, 36; 31, 37; 31, 38; 32, 39; 32, 40; 33, 41; 35, 42; 35, 43; 36, 44; 36, 45; 37, 46; 37, 47; 38, 48; 38, 49; 41, 50; 41, 51; 42, 52; 42, 53; 43, 54; 44, 55; 46, 56; 48, 57; 50, 58; 50, 59; 51, 60; 52, 61; 52, 62; 54, 63 | def list_media(self, series, sort=META.SORT_DESC, limit=META.MAX_MEDIA, offset=0):
"""List media for a given series or collection
@param crunchyroll.models.Series series the series to search for
@param str sort choose the ordering of the
results, only META.SORT_DESC
is known to work
@param int limit limit size of results
@param int offset start results from this index,
for pagination
@return list<crunchyroll.models.Media>
"""
params = {
'sort': sort,
'offset': offset,
'limit': limit,
}
params.update(self._get_series_query_dict(series))
result = self._android_api.list_media(**params)
return result |
0, module; 1, function_definition; 2, function_name:solve_sort; 3, parameters; 4, block; 5, identifier:expr; 6, identifier:vars; 7, expression_statement; 8, expression_statement; 9, expression_statement; 10, function_definition; 11, expression_statement; 12, return_statement; 13, comment:"""Sort values on the LHS by the value they yield when passed to RHS."""; 14, assignment; 15, assignment; 16, function_name:_key_func; 17, parameters; 18, block; 19, assignment; 20, call; 21, identifier:lhs_values; 22, call; 23, identifier:sort_expression; 24, attribute; 25, identifier:x; 26, return_statement; 27, identifier:results; 28, call; 29, identifier:Result; 30, argument_list; 31, attribute; 32, argument_list; 33, identifier:expr; 34, identifier:rhs; 35, attribute; 36, attribute; 37, argument_list; 38, call; 39, tuple; 40, identifier:repeated; 41, identifier:getvalues; 42, subscript; 43, call; 44, identifier:value; 45, identifier:ordered; 46, identifier:ordered; 47, identifier:lhs_values; 48, keyword_argument; 49, attribute; 50, argument_list; 51, call; 52, integer:0; 53, identifier:solve; 54, argument_list; 55, identifier:key_func; 56, identifier:_key_func; 57, identifier:repeated; 58, identifier:meld; 59, list_splat; 60, identifier:__solve_for_repeated; 61, argument_list; 62, identifier:sort_expression; 63, call; 64, identifier:results; 65, attribute; 66, identifier:vars; 67, identifier:__nest_scope; 68, argument_list; 69, identifier:expr; 70, identifier:lhs; 71, attribute; 72, identifier:vars; 73, identifier:x; 74, identifier:expr; 75, identifier:lhs | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 7, 13; 8, 14; 9, 15; 10, 16; 10, 17; 10, 18; 11, 19; 12, 20; 14, 21; 14, 22; 15, 23; 15, 24; 17, 25; 18, 26; 19, 27; 19, 28; 20, 29; 20, 30; 22, 31; 22, 32; 24, 33; 24, 34; 26, 35; 28, 36; 28, 37; 30, 38; 30, 39; 31, 40; 31, 41; 32, 42; 35, 43; 35, 44; 36, 45; 36, 46; 37, 47; 37, 48; 38, 49; 38, 50; 42, 51; 42, 52; 43, 53; 43, 54; 48, 55; 48, 56; 49, 57; 49, 58; 50, 59; 51, 60; 51, 61; 54, 62; 54, 63; 59, 64; 61, 65; 61, 66; 63, 67; 63, 68; 65, 69; 65, 70; 68, 71; 68, 72; 68, 73; 71, 74; 71, 75 | def solve_sort(expr, vars):
"""Sort values on the LHS by the value they yield when passed to RHS."""
lhs_values = repeated.getvalues(__solve_for_repeated(expr.lhs, vars)[0])
sort_expression = expr.rhs
def _key_func(x):
return solve(sort_expression, __nest_scope(expr.lhs, vars, x)).value
results = ordered.ordered(lhs_values, key_func=_key_func)
return Result(repeated.meld(*results), ()) |
0, module; 1, function_definition; 2, function_name:expression; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, expression_statement; 8, expression_statement; 9, return_statement; 10, identifier:previous_precedence; 11, integer:0; 12, comment:"""An expression is an atom or an infix expression.
Grammar (sort of, actually a precedence-climbing parser):
expression = atom [ binary_operator expression ] .
Args:
previous_precedence: What operator precedence should we start with?
"""; 13, assignment; 14, call; 15, identifier:lhs; 16, call; 17, attribute; 18, argument_list; 19, attribute; 20, argument_list; 21, identifier:self; 22, identifier:operator; 23, identifier:lhs; 24, identifier:previous_precedence; 25, identifier:self; 26, identifier:atom | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 6, 10; 6, 11; 7, 12; 8, 13; 9, 14; 13, 15; 13, 16; 14, 17; 14, 18; 16, 19; 16, 20; 17, 21; 17, 22; 18, 23; 18, 24; 19, 25; 19, 26 | def expression(self, previous_precedence=0):
"""An expression is an atom or an infix expression.
Grammar (sort of, actually a precedence-climbing parser):
expression = atom [ binary_operator expression ] .
Args:
previous_precedence: What operator precedence should we start with?
"""
lhs = self.atom()
return self.operator(lhs, previous_precedence) |
0, module; 1, function_definition; 2, function_name:application; 3, parameters; 4, block; 5, identifier:self; 6, identifier:func; 7, expression_statement; 8, expression_statement; 9, if_statement; 10, expression_statement; 11, while_statement; 12, expression_statement; 13, return_statement; 14, comment:"""Parse the function application subgrammar.
Function application can, conceptually, be thought of as a mixfix
operator, similar to the way array subscripting works. However, it is
not clear at this point whether we want to allow it to work as such,
because doing so would permit queries to, at runtime, select methods
out of an arbitrary object and then call them.
While there is a function whitelist and preventing this sort of thing
in the syntax isn't a security feature, it still seems like the
syntax should make it clear what the intended use of application is.
If we later decide to extend DottySQL to allow function application
over an arbitrary LHS expression then that syntax would be a strict
superset of the current syntax and backwards compatible.
"""; 15, assignment; 16, call; 17, comment:# That was easy.; 18, block; 19, assignment; 20, call; 21, block; 22, call; 23, call; 24, identifier:start; 25, attribute; 26, attribute; 27, argument_list; 28, return_statement; 29, identifier:arguments; 30, list; 31, attribute; 32, argument_list; 33, expression_statement; 34, attribute; 35, argument_list; 36, attribute; 37, argument_list; 38, attribute; 39, identifier:start; 40, attribute; 41, identifier:accept; 42, attribute; 43, call; 44, call; 45, attribute; 46, identifier:accept; 47, attribute; 48, call; 49, attribute; 50, identifier:expect; 51, attribute; 52, identifier:ast; 53, identifier:Apply; 54, identifier:func; 55, list_splat; 56, keyword_argument; 57, keyword_argument; 58, keyword_argument; 59, attribute; 60, identifier:matched; 61, identifier:self; 62, identifier:tokens; 63, identifier:common_grammar; 64, identifier:rparen; 65, attribute; 66, argument_list; 67, attribute; 68, argument_list; 69, identifier:self; 70, identifier:tokens; 71, identifier:common_grammar; 72, identifier:comma; 73, attribute; 74, argument_list; 75, identifier:self; 76, identifier:tokens; 77, identifier:common_grammar; 78, identifier:rparen; 79, identifier:arguments; 80, identifier:start; 81, identifier:start; 82, identifier:end; 83, attribute; 84, identifier:source; 85, attribute; 86, identifier:self; 87, identifier:tokens; 88, identifier:ast; 89, identifier:Apply; 90, identifier:func; 91, keyword_argument; 92, keyword_argument; 93, keyword_argument; 94, identifier:self; 95, identifier:expression; 96, identifier:arguments; 97, identifier:append; 98, call; 99, attribute; 100, identifier:end; 101, identifier:self; 102, identifier:original; 103, identifier:start; 104, identifier:start; 105, identifier:end; 106, attribute; 107, identifier:source; 108, attribute; 109, attribute; 110, argument_list; 111, attribute; 112, identifier:matched; 113, attribute; 114, identifier:end; 115, identifier:self; 116, identifier:original; 117, identifier:self; 118, identifier:expression; 119, identifier:self; 120, identifier:tokens; 121, attribute; 122, identifier:matched; 123, identifier:self; 124, identifier:tokens | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 7, 14; 8, 15; 9, 16; 9, 17; 9, 18; 10, 19; 11, 20; 11, 21; 12, 22; 13, 23; 15, 24; 15, 25; 16, 26; 16, 27; 18, 28; 19, 29; 19, 30; 20, 31; 20, 32; 21, 33; 22, 34; 22, 35; 23, 36; 23, 37; 25, 38; 25, 39; 26, 40; 26, 41; 27, 42; 28, 43; 30, 44; 31, 45; 31, 46; 32, 47; 33, 48; 34, 49; 34, 50; 35, 51; 36, 52; 36, 53; 37, 54; 37, 55; 37, 56; 37, 57; 37, 58; 38, 59; 38, 60; 40, 61; 40, 62; 42, 63; 42, 64; 43, 65; 43, 66; 44, 67; 44, 68; 45, 69; 45, 70; 47, 71; 47, 72; 48, 73; 48, 74; 49, 75; 49, 76; 51, 77; 51, 78; 55, 79; 56, 80; 56, 81; 57, 82; 57, 83; 58, 84; 58, 85; 59, 86; 59, 87; 65, 88; 65, 89; 66, 90; 66, 91; 66, 92; 66, 93; 67, 94; 67, 95; 73, 96; 73, 97; 74, 98; 83, 99; 83, 100; 85, 101; 85, 102; 91, 103; 91, 104; 92, 105; 92, 106; 93, 107; 93, 108; 98, 109; 98, 110; 99, 111; 99, 112; 106, 113; 106, 114; 108, 115; 108, 116; 109, 117; 109, 118; 111, 119; 111, 120; 113, 121; 113, 122; 121, 123; 121, 124 | def application(self, func):
"""Parse the function application subgrammar.
Function application can, conceptually, be thought of as a mixfix
operator, similar to the way array subscripting works. However, it is
not clear at this point whether we want to allow it to work as such,
because doing so would permit queries to, at runtime, select methods
out of an arbitrary object and then call them.
While there is a function whitelist and preventing this sort of thing
in the syntax isn't a security feature, it still seems like the
syntax should make it clear what the intended use of application is.
If we later decide to extend DottySQL to allow function application
over an arbitrary LHS expression then that syntax would be a strict
superset of the current syntax and backwards compatible.
"""
start = self.tokens.matched.start
if self.tokens.accept(common_grammar.rparen):
# That was easy.
return ast.Apply(func, start=start, end=self.tokens.matched.end,
source=self.original)
arguments = [self.expression()]
while self.tokens.accept(common_grammar.comma):
arguments.append(self.expression())
self.tokens.expect(common_grammar.rparen)
return ast.Apply(func, *arguments, start=start,
end=self.tokens.matched.end, source=self.original) |
0, module; 1, function_definition; 2, function_name:value_eq; 3, parameters; 4, block; 5, identifier:self; 6, identifier:other; 7, expression_statement; 8, expression_statement; 9, expression_statement; 10, return_statement; 11, comment:"""Sorted comparison of values."""; 12, assignment; 13, assignment; 14, comparison_operator:self_sorted == other_sorted; 15, identifier:self_sorted; 16, call; 17, identifier:other_sorted; 18, call; 19, identifier:self_sorted; 20, identifier:other_sorted; 21, attribute; 22, argument_list; 23, attribute; 24, argument_list; 25, identifier:ordered; 26, identifier:ordered; 27, call; 28, identifier:ordered; 29, identifier:ordered; 30, call; 31, attribute; 32, argument_list; 33, attribute; 34, argument_list; 35, identifier:self; 36, identifier:getvalues; 37, identifier:repeated; 38, identifier:getvalues; 39, identifier:other | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 7, 11; 8, 12; 9, 13; 10, 14; 12, 15; 12, 16; 13, 17; 13, 18; 14, 19; 14, 20; 16, 21; 16, 22; 18, 23; 18, 24; 21, 25; 21, 26; 22, 27; 23, 28; 23, 29; 24, 30; 27, 31; 27, 32; 30, 33; 30, 34; 31, 35; 31, 36; 33, 37; 33, 38; 34, 39 | def value_eq(self, other):
"""Sorted comparison of values."""
self_sorted = ordered.ordered(self.getvalues())
other_sorted = ordered.ordered(repeated.getvalues(other))
return self_sorted == other_sorted |
0, module; 1, function_definition; 2, function_name:merge; 3, parameters; 4, block; 5, identifier:a_intervals; 6, identifier:b_intervals; 7, identifier:op; 8, expression_statement; 9, expression_statement; 10, expression_statement; 11, expression_statement; 12, expression_statement; 13, expression_statement; 14, expression_statement; 15, expression_statement; 16, expression_statement; 17, expression_statement; 18, while_statement; 19, return_statement; 20, comment:"""
Merge two lists of intervals according to the boolean function op
``a_intervals`` and ``b_intervals`` need to be sorted and consistent (no overlapping intervals).
This operation keeps the resulting interval set consistent.
Parameters
----------
a_intervals : `~numpy.ndarray`
A sorted merged list of intervals represented as a N x 2 numpy array
b_intervals : `~numpy.ndarray`
A sorted merged list of intervals represented as a N x 2 numpy array
op : `function`
Lambda function taking two params and returning the result of the operation between
these two params.
Exemple : lambda in_a, in_b: in_a and in_b describes the intersection of ``a_intervals`` and
``b_intervals`` whereas lambda in_a, in_b: in_a or in_b describes the union of ``a_intervals`` and
``b_intervals``.
Returns
-------
array : `numpy.ndarray`
a N x 2 numpy containing intervals resulting from the op between ``a_intervals`` and ``b_intervals``.
"""; 21, assignment; 22, assignment; 23, assignment; 24, augmented_assignment; 25, augmented_assignment; 26, assignment; 27, assignment; 28, assignment; 29, assignment; 30, comparison_operator:scan < sentinel; 31, block; 32, call; 33, identifier:a_endpoints; 34, call; 35, identifier:b_endpoints; 36, call; 37, identifier:sentinel; 38, binary_operator:max(a_endpoints[-1], b_endpoints[-1]) + 1; 39, identifier:a_endpoints; 40, list; 41, identifier:b_endpoints; 42, list; 43, identifier:a_index; 44, integer:0; 45, identifier:b_index; 46, integer:0; 47, identifier:res; 48, list; 49, identifier:scan; 50, call; 51, identifier:scan; 52, identifier:sentinel; 53, expression_statement; 54, expression_statement; 55, expression_statement; 56, if_statement; 57, if_statement; 58, if_statement; 59, expression_statement; 60, attribute; 61, argument_list; 62, attribute; 63, argument_list; 64, attribute; 65, argument_list; 66, call; 67, integer:1; 68, identifier:sentinel; 69, identifier:sentinel; 70, identifier:min; 71, argument_list; 72, assignment; 73, assignment; 74, assignment; 75, binary_operator:in_res ^ (len(res) % 2); 76, block; 77, comparison_operator:scan == a_endpoints[a_index]; 78, block; 79, comparison_operator:scan == b_endpoints[b_index]; 80, block; 81, assignment; 82, call; 83, identifier:reshape; 84, tuple; 85, call; 86, identifier:tolist; 87, call; 88, identifier:tolist; 89, identifier:max; 90, argument_list; 91, subscript; 92, subscript; 93, identifier:in_a; 94, not_operator; 95, identifier:in_b; 96, not_operator; 97, identifier:in_res; 98, call; 99, identifier:in_res; 100, parenthesized_expression; 101, expression_statement; 102, identifier:scan; 103, subscript; 104, expression_statement; 105, identifier:scan; 106, subscript; 107, expression_statement; 108, identifier:scan; 109, call; 110, attribute; 111, argument_list; 112, unary_operator; 113, integer:2; 114, attribute; 115, argument_list; 116, attribute; 117, argument_list; 118, subscript; 119, subscript; 120, identifier:a_endpoints; 121, integer:0; 122, identifier:b_endpoints; 123, integer:0; 124, parenthesized_expression; 125, parenthesized_expression; 126, identifier:op; 127, argument_list; 128, binary_operator:len(res) % 2; 129, augmented_assignment; 130, identifier:a_endpoints; 131, identifier:a_index; 132, augmented_assignment; 133, identifier:b_endpoints; 134, identifier:b_index; 135, augmented_assignment; 136, identifier:min; 137, argument_list; 138, identifier:np; 139, identifier:asarray; 140, identifier:res; 141, integer:1; 142, identifier:a_intervals; 143, identifier:flatten; 144, identifier:b_intervals; 145, identifier:flatten; 146, identifier:a_endpoints; 147, unary_operator; 148, identifier:b_endpoints; 149, unary_operator; 150, binary_operator:(scan < a_endpoints[a_index]) ^ (a_index % 2); 151, binary_operator:(scan < b_endpoints[b_index]) ^ (b_index % 2); 152, identifier:in_a; 153, identifier:in_b; 154, call; 155, integer:2; 156, identifier:res; 157, list; 158, identifier:a_index; 159, integer:1; 160, identifier:b_index; 161, integer:1; 162, subscript; 163, subscript; 164, integer:1; 165, integer:1; 166, parenthesized_expression; 167, parenthesized_expression; 168, parenthesized_expression; 169, parenthesized_expression; 170, identifier:len; 171, argument_list; 172, identifier:scan; 173, identifier:a_endpoints; 174, identifier:a_index; 175, identifier:b_endpoints; 176, identifier:b_index; 177, comparison_operator:scan < a_endpoints[a_index]; 178, binary_operator:a_index % 2; 179, comparison_operator:scan < b_endpoints[b_index]; 180, binary_operator:b_index % 2; 181, identifier:res; 182, identifier:scan; 183, subscript; 184, identifier:a_index; 185, integer:2; 186, identifier:scan; 187, subscript; 188, identifier:b_index; 189, integer:2; 190, identifier:a_endpoints; 191, identifier:a_index; 192, identifier:b_endpoints; 193, identifier:b_index | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 8, 20; 9, 21; 10, 22; 11, 23; 12, 24; 13, 25; 14, 26; 15, 27; 16, 28; 17, 29; 18, 30; 18, 31; 19, 32; 21, 33; 21, 34; 22, 35; 22, 36; 23, 37; 23, 38; 24, 39; 24, 40; 25, 41; 25, 42; 26, 43; 26, 44; 27, 45; 27, 46; 28, 47; 28, 48; 29, 49; 29, 50; 30, 51; 30, 52; 31, 53; 31, 54; 31, 55; 31, 56; 31, 57; 31, 58; 31, 59; 32, 60; 32, 61; 34, 62; 34, 63; 36, 64; 36, 65; 38, 66; 38, 67; 40, 68; 42, 69; 50, 70; 50, 71; 53, 72; 54, 73; 55, 74; 56, 75; 56, 76; 57, 77; 57, 78; 58, 79; 58, 80; 59, 81; 60, 82; 60, 83; 61, 84; 62, 85; 62, 86; 64, 87; 64, 88; 66, 89; 66, 90; 71, 91; 71, 92; 72, 93; 72, 94; 73, 95; 73, 96; 74, 97; 74, 98; 75, 99; 75, 100; 76, 101; 77, 102; 77, 103; 78, 104; 79, 105; 79, 106; 80, 107; 81, 108; 81, 109; 82, 110; 82, 111; 84, 112; 84, 113; 85, 114; 85, 115; 87, 116; 87, 117; 90, 118; 90, 119; 91, 120; 91, 121; 92, 122; 92, 123; 94, 124; 96, 125; 98, 126; 98, 127; 100, 128; 101, 129; 103, 130; 103, 131; 104, 132; 106, 133; 106, 134; 107, 135; 109, 136; 109, 137; 110, 138; 110, 139; 111, 140; 112, 141; 114, 142; 114, 143; 116, 144; 116, 145; 118, 146; 118, 147; 119, 148; 119, 149; 124, 150; 125, 151; 127, 152; 127, 153; 128, 154; 128, 155; 129, 156; 129, 157; 132, 158; 132, 159; 135, 160; 135, 161; 137, 162; 137, 163; 147, 164; 149, 165; 150, 166; 150, 167; 151, 168; 151, 169; 154, 170; 154, 171; 157, 172; 162, 173; 162, 174; 163, 175; 163, 176; 166, 177; 167, 178; 168, 179; 169, 180; 171, 181; 177, 182; 177, 183; 178, 184; 178, 185; 179, 186; 179, 187; 180, 188; 180, 189; 183, 190; 183, 191; 187, 192; 187, 193 | def merge(a_intervals, b_intervals, op):
"""
Merge two lists of intervals according to the boolean function op
``a_intervals`` and ``b_intervals`` need to be sorted and consistent (no overlapping intervals).
This operation keeps the resulting interval set consistent.
Parameters
----------
a_intervals : `~numpy.ndarray`
A sorted merged list of intervals represented as a N x 2 numpy array
b_intervals : `~numpy.ndarray`
A sorted merged list of intervals represented as a N x 2 numpy array
op : `function`
Lambda function taking two params and returning the result of the operation between
these two params.
Exemple : lambda in_a, in_b: in_a and in_b describes the intersection of ``a_intervals`` and
``b_intervals`` whereas lambda in_a, in_b: in_a or in_b describes the union of ``a_intervals`` and
``b_intervals``.
Returns
-------
array : `numpy.ndarray`
a N x 2 numpy containing intervals resulting from the op between ``a_intervals`` and ``b_intervals``.
"""
a_endpoints = a_intervals.flatten().tolist()
b_endpoints = b_intervals.flatten().tolist()
sentinel = max(a_endpoints[-1], b_endpoints[-1]) + 1
a_endpoints += [sentinel]
b_endpoints += [sentinel]
a_index = 0
b_index = 0
res = []
scan = min(a_endpoints[0], b_endpoints[0])
while scan < sentinel:
in_a = not ((scan < a_endpoints[a_index]) ^ (a_index % 2))
in_b = not ((scan < b_endpoints[b_index]) ^ (b_index % 2))
in_res = op(in_a, in_b)
if in_res ^ (len(res) % 2):
res += [scan]
if scan == a_endpoints[a_index]:
a_index += 1
if scan == b_endpoints[b_index]:
b_index += 1
scan = min(a_endpoints[a_index], b_endpoints[b_index])
return np.asarray(res).reshape((-1, 2)) |
0, module; 1, function_definition; 2, function_name:check_bam; 3, parameters; 4, block; 5, identifier:bam; 6, default_parameter; 7, expression_statement; 8, expression_statement; 9, expression_statement; 10, if_statement; 11, if_statement; 12, if_statement; 13, return_statement; 14, identifier:samtype; 15, string:"bam"; 16, comment:"""Check if bam file is valid.
Bam file should:
- exists
- has an index (create if necessary)
- is sorted by coordinate
- has at least one mapped read
"""; 17, call; 18, assignment; 19, not_operator; 20, block; 21, not_operator; 22, block; 23, comparison_operator:samtype == "bam"; 24, block; 25, identifier:samfile; 26, attribute; 27, argument_list; 28, identifier:samfile; 29, call; 30, call; 31, expression_statement; 32, expression_statement; 33, comment:# Need to reload the samfile after creating index; 34, expression_statement; 35, comparison_operator:samfile.header['HD']['SO'] == 'coordinate'; 36, expression_statement; 37, expression_statement; 38, identifier:samtype; 39, string:"bam"; 40, expression_statement; 41, if_statement; 42, identifier:ut; 43, identifier:check_existance; 44, identifier:bam; 45, attribute; 46, argument_list; 47, attribute; 48, argument_list; 49, call; 50, assignment; 51, call; 52, subscript; 53, string; 54, call; 55, call; 56, call; 57, comparison_operator:samfile.mapped == 0; 58, block; 59, identifier:pysam; 60, identifier:AlignmentFile; 61, identifier:bam; 62, string:"rb"; 63, identifier:samfile; 64, identifier:has_index; 65, attribute; 66, argument_list; 67, identifier:samfile; 68, call; 69, attribute; 70, argument_list; 71, subscript; 72, string; 73, string_content:coordinate; 74, attribute; 75, argument_list; 76, attribute; 77, argument_list; 78, attribute; 79, argument_list; 80, attribute; 81, integer:0; 82, expression_statement; 83, expression_statement; 84, identifier:pysam; 85, identifier:index; 86, identifier:bam; 87, attribute; 88, argument_list; 89, identifier:logging; 90, identifier:info; 91, string:"Nanoget: No index for bam file could be found, created index."; 92, attribute; 93, string; 94, string_content:SO; 95, identifier:logging; 96, identifier:error; 97, call; 98, identifier:sys; 99, identifier:exit; 100, string:"Please use a bam file sorted by coordinate."; 101, identifier:logging; 102, identifier:info; 103, call; 104, identifier:samfile; 105, identifier:mapped; 106, call; 107, call; 108, identifier:pysam; 109, identifier:AlignmentFile; 110, identifier:bam; 111, string:"rb"; 112, identifier:samfile; 113, identifier:header; 114, string_content:HD; 115, attribute; 116, argument_list; 117, attribute; 118, argument_list; 119, attribute; 120, argument_list; 121, attribute; 122, argument_list; 123, string:"Nanoget: Bam file {} not sorted by coordinate!."; 124, identifier:format; 125, identifier:bam; 126, string:"Nanoget: Bam file {} contains {} mapped and {} unmapped reads."; 127, identifier:format; 128, identifier:bam; 129, attribute; 130, attribute; 131, identifier:logging; 132, identifier:error; 133, call; 134, identifier:sys; 135, identifier:exit; 136, call; 137, identifier:samfile; 138, identifier:mapped; 139, identifier:samfile; 140, identifier:unmapped; 141, attribute; 142, argument_list; 143, attribute; 144, argument_list; 145, string:"Nanoget: Bam file {} does not contain aligned reads."; 146, identifier:format; 147, identifier:bam; 148, string:"FATAL: not a single read was mapped in bam file {}"; 149, identifier:format; 150, identifier:bam | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 6, 14; 6, 15; 7, 16; 8, 17; 9, 18; 10, 19; 10, 20; 11, 21; 11, 22; 12, 23; 12, 24; 13, 25; 17, 26; 17, 27; 18, 28; 18, 29; 19, 30; 20, 31; 20, 32; 20, 33; 20, 34; 21, 35; 22, 36; 22, 37; 23, 38; 23, 39; 24, 40; 24, 41; 26, 42; 26, 43; 27, 44; 29, 45; 29, 46; 30, 47; 30, 48; 31, 49; 32, 50; 34, 51; 35, 52; 35, 53; 36, 54; 37, 55; 40, 56; 41, 57; 41, 58; 45, 59; 45, 60; 46, 61; 46, 62; 47, 63; 47, 64; 49, 65; 49, 66; 50, 67; 50, 68; 51, 69; 51, 70; 52, 71; 52, 72; 53, 73; 54, 74; 54, 75; 55, 76; 55, 77; 56, 78; 56, 79; 57, 80; 57, 81; 58, 82; 58, 83; 65, 84; 65, 85; 66, 86; 68, 87; 68, 88; 69, 89; 69, 90; 70, 91; 71, 92; 71, 93; 72, 94; 74, 95; 74, 96; 75, 97; 76, 98; 76, 99; 77, 100; 78, 101; 78, 102; 79, 103; 80, 104; 80, 105; 82, 106; 83, 107; 87, 108; 87, 109; 88, 110; 88, 111; 92, 112; 92, 113; 93, 114; 97, 115; 97, 116; 103, 117; 103, 118; 106, 119; 106, 120; 107, 121; 107, 122; 115, 123; 115, 124; 116, 125; 117, 126; 117, 127; 118, 128; 118, 129; 118, 130; 119, 131; 119, 132; 120, 133; 121, 134; 121, 135; 122, 136; 129, 137; 129, 138; 130, 139; 130, 140; 133, 141; 133, 142; 136, 143; 136, 144; 141, 145; 141, 146; 142, 147; 143, 148; 143, 149; 144, 150 | def check_bam(bam, samtype="bam"):
"""Check if bam file is valid.
Bam file should:
- exists
- has an index (create if necessary)
- is sorted by coordinate
- has at least one mapped read
"""
ut.check_existance(bam)
samfile = pysam.AlignmentFile(bam, "rb")
if not samfile.has_index():
pysam.index(bam)
samfile = pysam.AlignmentFile(bam, "rb") # Need to reload the samfile after creating index
logging.info("Nanoget: No index for bam file could be found, created index.")
if not samfile.header['HD']['SO'] == 'coordinate':
logging.error("Nanoget: Bam file {} not sorted by coordinate!.".format(bam))
sys.exit("Please use a bam file sorted by coordinate.")
if samtype == "bam":
logging.info("Nanoget: Bam file {} contains {} mapped and {} unmapped reads.".format(
bam, samfile.mapped, samfile.unmapped))
if samfile.mapped == 0:
logging.error("Nanoget: Bam file {} does not contain aligned reads.".format(bam))
sys.exit("FATAL: not a single read was mapped in bam file {}".format(bam))
return samfile |
0, module; 1, function_definition; 2, function_name:get_input; 3, parameters; 4, block; 5, identifier:source; 6, identifier:files; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, default_parameter; 12, expression_statement; 13, expression_statement; 14, expression_statement; 15, expression_statement; 16, with_statement; 17, if_statement; 18, expression_statement; 19, expression_statement; 20, if_statement; 21, identifier:threads; 22, integer:4; 23, identifier:readtype; 24, string:"1D"; 25, identifier:combine; 26, string:"simple"; 27, identifier:names; 28, None; 29, identifier:barcoded; 30, False; 31, comment:"""Get input and process accordingly.
Data can be:
- a uncompressed, bgzip, bzip2 or gzip compressed fastq file
- a uncompressed, bgzip, bzip2 or gzip compressed fasta file
- a rich fastq containing additional key=value information in the description,
as produced by MinKNOW and albacore with the same compression options as above
- a sorted bam file
- a sorted cram file
- a (compressed) sequencing_summary.txt file generated by albacore
Handle is passed to the proper functions to get DataFrame with metrics
Multiple files of the same type can be used to extract info from, which is done in parallel
Arguments:
- source: defines the input data type and the function that needs to be called
- files: is a list of one or more files to operate on, from the type of <source>
- threads: is the amount of workers which can be used
- readtype: (only relevant for summary input) and specifies which columns have to be extracted
- combine: is either 'simple' or 'track', with the difference that with 'track' an additional
field is created with the name of the dataset
- names: if combine="track", the names to be used for the datasets. Needs to have same length as
files, or None
"""; 32, assignment; 33, assignment; 34, assignment; 35, with_clause; 36, block; 37, boolean_operator; 38, block; 39, assignment; 40, call; 41, comparison_operator:len(datadf) == 0; 42, block; 43, else_clause; 44, identifier:proc_functions; 45, dictionary; 46, identifier:filethreads; 47, call; 48, identifier:threadsleft; 49, binary_operator:threads - filethreads; 50, with_item; 51, expression_statement; 52, expression_statement; 53, comparison_operator:"readIDs" in datadf; 54, call; 55, expression_statement; 56, identifier:datadf; 57, call; 58, attribute; 59, argument_list; 60, call; 61, integer:0; 62, expression_statement; 63, expression_statement; 64, block; 65, pair; 66, pair; 67, pair; 68, pair; 69, pair; 70, pair; 71, pair; 72, pair; 73, identifier:min; 74, argument_list; 75, identifier:threads; 76, identifier:filethreads; 77, as_pattern; 78, assignment; 79, assignment; 80, string:"readIDs"; 81, identifier:datadf; 82, attribute; 83, argument_list; 84, call; 85, identifier:calculate_start_time; 86, argument_list; 87, identifier:logging; 88, identifier:info; 89, call; 90, identifier:len; 91, argument_list; 92, call; 93, call; 94, return_statement; 95, string; 96, attribute; 97, string; 98, attribute; 99, string; 100, attribute; 101, string; 102, attribute; 103, string; 104, attribute; 105, string; 106, attribute; 107, string; 108, attribute; 109, string; 110, attribute; 111, call; 112, identifier:threads; 113, call; 114, as_pattern_target; 115, identifier:extration_function; 116, call; 117, identifier:datadf; 118, call; 119, call; 120, identifier:any; 121, attribute; 122, argument_list; 123, identifier:datadf; 124, attribute; 125, argument_list; 126, identifier:datadf; 127, attribute; 128, argument_list; 129, attribute; 130, argument_list; 131, identifier:datadf; 132, string_content:fastq; 133, identifier:ex; 134, identifier:process_fastq_plain; 135, string_content:fasta; 136, identifier:ex; 137, identifier:process_fasta; 138, string_content:bam; 139, identifier:ex; 140, identifier:process_bam; 141, string_content:summary; 142, identifier:ex; 143, identifier:process_summary; 144, string_content:fastq_rich; 145, identifier:ex; 146, identifier:process_fastq_rich; 147, string_content:fastq_minimal; 148, identifier:ex; 149, identifier:process_fastq_minimal; 150, string_content:cram; 151, identifier:ex; 152, identifier:process_cram; 153, string_content:ubam; 154, identifier:ex; 155, identifier:process_ubam; 156, identifier:len; 157, argument_list; 158, attribute; 159, argument_list; 160, identifier:executor; 161, identifier:partial; 162, argument_list; 163, identifier:combine_dfs; 164, argument_list; 165, attribute; 166, argument_list; 167, identifier:datadf; 168, identifier:drop; 169, string:"readIDs"; 170, keyword_argument; 171, keyword_argument; 172, string:"Nanoget: Gathered all metrics of {} reads"; 173, identifier:format; 174, call; 175, identifier:logging; 176, identifier:critical; 177, call; 178, identifier:sys; 179, identifier:exit; 180, string:"Fatal: No reads found in input."; 181, identifier:files; 182, identifier:cfutures; 183, identifier:ProcessPoolExecutor; 184, keyword_argument; 185, subscript; 186, keyword_argument; 187, keyword_argument; 188, keyword_argument; 189, keyword_argument; 190, keyword_argument; 191, keyword_argument; 192, identifier:pd; 193, identifier:isna; 194, subscript; 195, identifier:axis; 196, string; 197, identifier:inplace; 198, True; 199, identifier:len; 200, argument_list; 201, attribute; 202, argument_list; 203, identifier:max_workers; 204, identifier:filethreads; 205, identifier:proc_functions; 206, identifier:source; 207, identifier:threads; 208, identifier:threadsleft; 209, identifier:readtype; 210, identifier:readtype; 211, identifier:barcoded; 212, identifier:barcoded; 213, identifier:dfs; 214, list_comprehension; 215, identifier:names; 216, boolean_operator; 217, identifier:method; 218, identifier:combine; 219, identifier:datadf; 220, string:"readIDs"; 221, string_content:columns; 222, identifier:datadf; 223, string:"Nanoget: no reads retrieved."; 224, identifier:format; 225, call; 226, identifier:out; 227, for_in_clause; 228, identifier:names; 229, identifier:files; 230, identifier:len; 231, argument_list; 232, identifier:out; 233, call; 234, identifier:datadf; 235, attribute; 236, argument_list; 237, identifier:executor; 238, identifier:map; 239, identifier:extration_function; 240, identifier:files | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 7, 21; 7, 22; 8, 23; 8, 24; 9, 25; 9, 26; 10, 27; 10, 28; 11, 29; 11, 30; 12, 31; 13, 32; 14, 33; 15, 34; 16, 35; 16, 36; 17, 37; 17, 38; 18, 39; 19, 40; 20, 41; 20, 42; 20, 43; 32, 44; 32, 45; 33, 46; 33, 47; 34, 48; 34, 49; 35, 50; 36, 51; 36, 52; 37, 53; 37, 54; 38, 55; 39, 56; 39, 57; 40, 58; 40, 59; 41, 60; 41, 61; 42, 62; 42, 63; 43, 64; 45, 65; 45, 66; 45, 67; 45, 68; 45, 69; 45, 70; 45, 71; 45, 72; 47, 73; 47, 74; 49, 75; 49, 76; 50, 77; 51, 78; 52, 79; 53, 80; 53, 81; 54, 82; 54, 83; 55, 84; 57, 85; 57, 86; 58, 87; 58, 88; 59, 89; 60, 90; 60, 91; 62, 92; 63, 93; 64, 94; 65, 95; 65, 96; 66, 97; 66, 98; 67, 99; 67, 100; 68, 101; 68, 102; 69, 103; 69, 104; 70, 105; 70, 106; 71, 107; 71, 108; 72, 109; 72, 110; 74, 111; 74, 112; 77, 113; 77, 114; 78, 115; 78, 116; 79, 117; 79, 118; 82, 119; 82, 120; 84, 121; 84, 122; 86, 123; 89, 124; 89, 125; 91, 126; 92, 127; 92, 128; 93, 129; 93, 130; 94, 131; 95, 132; 96, 133; 96, 134; 97, 135; 98, 136; 98, 137; 99, 138; 100, 139; 100, 140; 101, 141; 102, 142; 102, 143; 103, 144; 104, 145; 104, 146; 105, 147; 106, 148; 106, 149; 107, 150; 108, 151; 108, 152; 109, 153; 110, 154; 110, 155; 111, 156; 111, 157; 113, 158; 113, 159; 114, 160; 116, 161; 116, 162; 118, 163; 118, 164; 119, 165; 119, 166; 121, 167; 121, 168; 122, 169; 122, 170; 122, 171; 124, 172; 124, 173; 125, 174; 127, 175; 127, 176; 128, 177; 129, 178; 129, 179; 130, 180; 157, 181; 158, 182; 158, 183; 159, 184; 162, 185; 162, 186; 162, 187; 162, 188; 164, 189; 164, 190; 164, 191; 165, 192; 165, 193; 166, 194; 170, 195; 170, 196; 171, 197; 171, 198; 174, 199; 174, 200; 177, 201; 177, 202; 184, 203; 184, 204; 185, 205; 185, 206; 186, 207; 186, 208; 187, 209; 187, 210; 188, 211; 188, 212; 189, 213; 189, 214; 190, 215; 190, 216; 191, 217; 191, 218; 194, 219; 194, 220; 196, 221; 200, 222; 201, 223; 201, 224; 202, 225; 214, 226; 214, 227; 216, 228; 216, 229; 225, 230; 225, 231; 227, 232; 227, 233; 231, 234; 233, 235; 233, 236; 235, 237; 235, 238; 236, 239; 236, 240 | def get_input(source, files, threads=4, readtype="1D",
combine="simple", names=None, barcoded=False):
"""Get input and process accordingly.
Data can be:
- a uncompressed, bgzip, bzip2 or gzip compressed fastq file
- a uncompressed, bgzip, bzip2 or gzip compressed fasta file
- a rich fastq containing additional key=value information in the description,
as produced by MinKNOW and albacore with the same compression options as above
- a sorted bam file
- a sorted cram file
- a (compressed) sequencing_summary.txt file generated by albacore
Handle is passed to the proper functions to get DataFrame with metrics
Multiple files of the same type can be used to extract info from, which is done in parallel
Arguments:
- source: defines the input data type and the function that needs to be called
- files: is a list of one or more files to operate on, from the type of <source>
- threads: is the amount of workers which can be used
- readtype: (only relevant for summary input) and specifies which columns have to be extracted
- combine: is either 'simple' or 'track', with the difference that with 'track' an additional
field is created with the name of the dataset
- names: if combine="track", the names to be used for the datasets. Needs to have same length as
files, or None
"""
proc_functions = {
'fastq': ex.process_fastq_plain,
'fasta': ex.process_fasta,
'bam': ex.process_bam,
'summary': ex.process_summary,
'fastq_rich': ex.process_fastq_rich,
'fastq_minimal': ex.process_fastq_minimal,
'cram': ex.process_cram,
'ubam': ex.process_ubam, }
filethreads = min(len(files), threads)
threadsleft = threads - filethreads
with cfutures.ProcessPoolExecutor(max_workers=filethreads) as executor:
extration_function = partial(proc_functions[source],
threads=threadsleft,
readtype=readtype,
barcoded=barcoded)
datadf = combine_dfs(
dfs=[out for out in executor.map(extration_function, files)],
names=names or files,
method=combine)
if "readIDs" in datadf and pd.isna(datadf["readIDs"]).any():
datadf.drop("readIDs", axis='columns', inplace=True)
datadf = calculate_start_time(datadf)
logging.info("Nanoget: Gathered all metrics of {} reads".format(len(datadf)))
if len(datadf) == 0:
logging.critical("Nanoget: no reads retrieved.".format(len(datadf)))
sys.exit("Fatal: No reads found in input.")
else:
return datadf |
0, module; 1, function_definition; 2, function_name:validate_wavelengths; 3, parameters; 4, block; 5, identifier:wavelengths; 6, expression_statement; 7, if_statement; 8, if_statement; 9, expression_statement; 10, comment:# Check for zeroes; 11, if_statement; 12, comment:# Check for monotonicity; 13, expression_statement; 14, if_statement; 15, comment:# Check for duplicate values; 16, if_statement; 17, comment:"""Check wavelengths for ``synphot`` compatibility.
Wavelengths must satisfy these conditions:
* valid unit type, if given
* no zeroes
* monotonic ascending or descending
* no duplicate values
Parameters
----------
wavelengths : array-like or `~astropy.units.quantity.Quantity`
Wavelength values.
Raises
------
synphot.exceptions.SynphotError
Wavelengths unit type is invalid.
synphot.exceptions.DuplicateWavelength
Wavelength array contains duplicate entries.
synphot.exceptions.UnsortedWavelength
Wavelength array is not monotonic.
synphot.exceptions.ZeroWavelength
Negative or zero wavelength occurs in wavelength array.
"""; 18, call; 19, block; 20, else_clause; 21, call; 22, block; 23, assignment; 24, call; 25, block; 26, assignment; 27, not_operator; 28, block; 29, comparison_operator:wave.size > 1; 30, block; 31, identifier:isinstance; 32, argument_list; 33, expression_statement; 34, expression_statement; 35, block; 36, attribute; 37, argument_list; 38, expression_statement; 39, identifier:wave; 40, call; 41, attribute; 42, argument_list; 43, raise_statement; 44, identifier:sorted_wave; 45, call; 46, call; 47, if_statement; 48, attribute; 49, integer:1; 50, expression_statement; 51, if_statement; 52, identifier:wavelengths; 53, attribute; 54, call; 55, assignment; 56, expression_statement; 57, identifier:np; 58, identifier:isscalar; 59, identifier:wave; 60, assignment; 61, attribute; 62, argument_list; 63, identifier:np; 64, identifier:any; 65, comparison_operator:wave <= 0; 66, call; 67, attribute; 68, argument_list; 69, attribute; 70, argument_list; 71, call; 72, block; 73, else_clause; 74, identifier:wave; 75, identifier:size; 76, assignment; 77, call; 78, block; 79, identifier:u; 80, identifier:Quantity; 81, attribute; 82, argument_list; 83, identifier:wave; 84, attribute; 85, assignment; 86, identifier:wave; 87, list; 88, identifier:np; 89, identifier:asarray; 90, identifier:wave; 91, identifier:wave; 92, integer:0; 93, attribute; 94, argument_list; 95, identifier:np; 96, identifier:sort; 97, identifier:wave; 98, identifier:np; 99, identifier:alltrue; 100, comparison_operator:sorted_wave == wave; 101, attribute; 102, argument_list; 103, pass_statement; 104, comment:# Monotonic descending is allowed; 105, block; 106, identifier:dw; 107, binary_operator:sorted_wave[1:] - sorted_wave[:-1]; 108, attribute; 109, argument_list; 110, raise_statement; 111, identifier:units; 112, identifier:validate_wave_unit; 113, attribute; 114, identifier:wavelengths; 115, identifier:value; 116, identifier:wave; 117, identifier:wavelengths; 118, identifier:wave; 119, identifier:exceptions; 120, identifier:ZeroWavelength; 121, string; 122, keyword_argument; 123, identifier:sorted_wave; 124, identifier:wave; 125, identifier:np; 126, identifier:alltrue; 127, comparison_operator:sorted_wave[::-1] == wave; 128, raise_statement; 129, subscript; 130, subscript; 131, identifier:np; 132, identifier:any; 133, comparison_operator:dw == 0; 134, call; 135, identifier:wavelengths; 136, identifier:unit; 137, string_content:Negative or zero wavelength occurs in wavelength array; 138, identifier:rows; 139, subscript; 140, subscript; 141, identifier:wave; 142, call; 143, identifier:sorted_wave; 144, slice; 145, identifier:sorted_wave; 146, slice; 147, identifier:dw; 148, integer:0; 149, attribute; 150, argument_list; 151, call; 152, integer:0; 153, identifier:sorted_wave; 154, slice; 155, attribute; 156, argument_list; 157, integer:1; 158, unary_operator; 159, identifier:exceptions; 160, identifier:DuplicateWavelength; 161, string; 162, keyword_argument; 163, attribute; 164, argument_list; 165, unary_operator; 166, identifier:exceptions; 167, identifier:UnsortedWavelength; 168, string; 169, keyword_argument; 170, integer:1; 171, string_content:Wavelength array contains duplicate entries; 172, identifier:rows; 173, subscript; 174, identifier:np; 175, identifier:where; 176, comparison_operator:wave <= 0; 177, integer:1; 178, string_content:Wavelength array is not monotonic; 179, identifier:rows; 180, subscript; 181, call; 182, integer:0; 183, identifier:wave; 184, integer:0; 185, call; 186, integer:0; 187, attribute; 188, argument_list; 189, attribute; 190, argument_list; 191, identifier:np; 192, identifier:where; 193, comparison_operator:dw == 0; 194, identifier:np; 195, identifier:where; 196, comparison_operator:sorted_wave != wave; 197, identifier:dw; 198, integer:0; 199, identifier:sorted_wave; 200, identifier:wave | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 6, 17; 7, 18; 7, 19; 7, 20; 8, 21; 8, 22; 9, 23; 11, 24; 11, 25; 13, 26; 14, 27; 14, 28; 16, 29; 16, 30; 18, 31; 18, 32; 19, 33; 19, 34; 20, 35; 21, 36; 21, 37; 22, 38; 23, 39; 23, 40; 24, 41; 24, 42; 25, 43; 26, 44; 26, 45; 27, 46; 28, 47; 29, 48; 29, 49; 30, 50; 30, 51; 32, 52; 32, 53; 33, 54; 34, 55; 35, 56; 36, 57; 36, 58; 37, 59; 38, 60; 40, 61; 40, 62; 41, 63; 41, 64; 42, 65; 43, 66; 45, 67; 45, 68; 46, 69; 46, 70; 47, 71; 47, 72; 47, 73; 48, 74; 48, 75; 50, 76; 51, 77; 51, 78; 53, 79; 53, 80; 54, 81; 54, 82; 55, 83; 55, 84; 56, 85; 60, 86; 60, 87; 61, 88; 61, 89; 62, 90; 65, 91; 65, 92; 66, 93; 66, 94; 67, 95; 67, 96; 68, 97; 69, 98; 69, 99; 70, 100; 71, 101; 71, 102; 72, 103; 72, 104; 73, 105; 76, 106; 76, 107; 77, 108; 77, 109; 78, 110; 81, 111; 81, 112; 82, 113; 84, 114; 84, 115; 85, 116; 85, 117; 87, 118; 93, 119; 93, 120; 94, 121; 94, 122; 100, 123; 100, 124; 101, 125; 101, 126; 102, 127; 105, 128; 107, 129; 107, 130; 108, 131; 108, 132; 109, 133; 110, 134; 113, 135; 113, 136; 121, 137; 122, 138; 122, 139; 127, 140; 127, 141; 128, 142; 129, 143; 129, 144; 130, 145; 130, 146; 133, 147; 133, 148; 134, 149; 134, 150; 139, 151; 139, 152; 140, 153; 140, 154; 142, 155; 142, 156; 144, 157; 146, 158; 149, 159; 149, 160; 150, 161; 150, 162; 151, 163; 151, 164; 154, 165; 155, 166; 155, 167; 156, 168; 156, 169; 158, 170; 161, 171; 162, 172; 162, 173; 163, 174; 163, 175; 164, 176; 165, 177; 168, 178; 169, 179; 169, 180; 173, 181; 173, 182; 176, 183; 176, 184; 180, 185; 180, 186; 181, 187; 181, 188; 185, 189; 185, 190; 187, 191; 187, 192; 188, 193; 189, 194; 189, 195; 190, 196; 193, 197; 193, 198; 196, 199; 196, 200 | def validate_wavelengths(wavelengths):
"""Check wavelengths for ``synphot`` compatibility.
Wavelengths must satisfy these conditions:
* valid unit type, if given
* no zeroes
* monotonic ascending or descending
* no duplicate values
Parameters
----------
wavelengths : array-like or `~astropy.units.quantity.Quantity`
Wavelength values.
Raises
------
synphot.exceptions.SynphotError
Wavelengths unit type is invalid.
synphot.exceptions.DuplicateWavelength
Wavelength array contains duplicate entries.
synphot.exceptions.UnsortedWavelength
Wavelength array is not monotonic.
synphot.exceptions.ZeroWavelength
Negative or zero wavelength occurs in wavelength array.
"""
if isinstance(wavelengths, u.Quantity):
units.validate_wave_unit(wavelengths.unit)
wave = wavelengths.value
else:
wave = wavelengths
if np.isscalar(wave):
wave = [wave]
wave = np.asarray(wave)
# Check for zeroes
if np.any(wave <= 0):
raise exceptions.ZeroWavelength(
'Negative or zero wavelength occurs in wavelength array',
rows=np.where(wave <= 0)[0])
# Check for monotonicity
sorted_wave = np.sort(wave)
if not np.alltrue(sorted_wave == wave):
if np.alltrue(sorted_wave[::-1] == wave):
pass # Monotonic descending is allowed
else:
raise exceptions.UnsortedWavelength(
'Wavelength array is not monotonic',
rows=np.where(sorted_wave != wave)[0])
# Check for duplicate values
if wave.size > 1:
dw = sorted_wave[1:] - sorted_wave[:-1]
if np.any(dw == 0):
raise exceptions.DuplicateWavelength(
'Wavelength array contains duplicate entries',
rows=np.where(dw == 0)[0]) |
0, module; 1, function_definition; 2, function_name:heapify; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, expression_statement; 8, if_statement; 9, identifier:key; 10, identifier:__marker; 11, comment:"""
Repair a broken heap. If the state of an item's priority value changes
you can re-sort the relevant item only by providing ``key``.
"""; 12, comparison_operator:key is self.__marker; 13, block; 14, else_clause; 15, identifier:key; 16, attribute; 17, expression_statement; 18, for_statement; 19, block; 20, identifier:self; 21, identifier:__marker; 22, assignment; 23, identifier:pos; 24, call; 25, block; 26, try_statement; 27, expression_statement; 28, identifier:n; 29, call; 30, identifier:reversed; 31, argument_list; 32, expression_statement; 33, block; 34, except_clause; 35, call; 36, identifier:len; 37, argument_list; 38, call; 39, call; 40, expression_statement; 41, identifier:KeyError; 42, block; 43, attribute; 44, argument_list; 45, attribute; 46, identifier:range; 47, argument_list; 48, attribute; 49, argument_list; 50, assignment; 51, raise_statement; 52, identifier:self; 53, identifier:_reheapify; 54, identifier:pos; 55, identifier:self; 56, identifier:_heap; 57, binary_operator:n//2; 58, identifier:self; 59, identifier:_sink; 60, identifier:pos; 61, identifier:pos; 62, subscript; 63, call; 64, identifier:n; 65, integer:2; 66, attribute; 67, identifier:key; 68, identifier:KeyError; 69, argument_list; 70, identifier:self; 71, identifier:_position; 72, identifier:key | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 6, 9; 6, 10; 7, 11; 8, 12; 8, 13; 8, 14; 12, 15; 12, 16; 13, 17; 13, 18; 14, 19; 16, 20; 16, 21; 17, 22; 18, 23; 18, 24; 18, 25; 19, 26; 19, 27; 22, 28; 22, 29; 24, 30; 24, 31; 25, 32; 26, 33; 26, 34; 27, 35; 29, 36; 29, 37; 31, 38; 32, 39; 33, 40; 34, 41; 34, 42; 35, 43; 35, 44; 37, 45; 38, 46; 38, 47; 39, 48; 39, 49; 40, 50; 42, 51; 43, 52; 43, 53; 44, 54; 45, 55; 45, 56; 47, 57; 48, 58; 48, 59; 49, 60; 50, 61; 50, 62; 51, 63; 57, 64; 57, 65; 62, 66; 62, 67; 63, 68; 63, 69; 66, 70; 66, 71; 69, 72 | def heapify(self, key=__marker):
"""
Repair a broken heap. If the state of an item's priority value changes
you can re-sort the relevant item only by providing ``key``.
"""
if key is self.__marker:
n = len(self._heap)
for pos in reversed(range(n//2)):
self._sink(pos)
else:
try:
pos = self._position[key]
except KeyError:
raise KeyError(key)
self._reheapify(pos) |
0, module; 1, function_definition; 2, function_name:__update_display_items_model; 3, parameters; 4, type; 5, block; 6, identifier:self; 7, typed_parameter; 8, typed_parameter; 9, typed_parameter; 10, None; 11, expression_statement; 12, with_statement; 13, identifier:display_items_model; 14, type; 15, identifier:data_group; 16, type; 17, identifier:filter_id; 18, type; 19, comment:"""Update the data item model with a new container, filter, and sorting.
This is called when the data item model is created or when the user changes
the data group or sorting settings.
"""; 20, with_clause; 21, comment:# change filter and sort together; 22, block; 23, attribute; 24, subscript; 25, subscript; 26, with_item; 27, if_statement; 28, identifier:ListModel; 29, identifier:FilteredListModel; 30, attribute; 31, attribute; 32, attribute; 33, identifier:str; 34, call; 35, comparison_operator:data_group is not None; 36, block; 37, elif_clause; 38, elif_clause; 39, elif_clause; 40, else_clause; 41, identifier:typing; 42, identifier:Optional; 43, identifier:DataGroup; 44, identifier:DataGroup; 45, identifier:typing; 46, identifier:Optional; 47, attribute; 48, argument_list; 49, identifier:data_group; 50, None; 51, expression_statement; 52, expression_statement; 53, expression_statement; 54, expression_statement; 55, comparison_operator:filter_id == "latest-session"; 56, block; 57, comparison_operator:filter_id == "temporary"; 58, block; 59, comparison_operator:filter_id == "none"; 60, comment:# not intended to be used directly; 61, block; 62, comment:# "all"; 63, block; 64, identifier:display_items_model; 65, identifier:changes; 66, assignment; 67, assignment; 68, assignment; 69, assignment; 70, identifier:filter_id; 71, string:"latest-session"; 72, expression_statement; 73, expression_statement; 74, expression_statement; 75, expression_statement; 76, expression_statement; 77, identifier:filter_id; 78, string:"temporary"; 79, expression_statement; 80, expression_statement; 81, expression_statement; 82, expression_statement; 83, expression_statement; 84, identifier:filter_id; 85, string:"none"; 86, expression_statement; 87, expression_statement; 88, expression_statement; 89, expression_statement; 90, expression_statement; 91, expression_statement; 92, expression_statement; 93, expression_statement; 94, expression_statement; 95, expression_statement; 96, attribute; 97, identifier:data_group; 98, attribute; 99, call; 100, attribute; 101, None; 102, attribute; 103, None; 104, assignment; 105, assignment; 106, assignment; 107, assignment; 108, assignment; 109, assignment; 110, assignment; 111, assignment; 112, assignment; 113, assignment; 114, assignment; 115, assignment; 116, assignment; 117, assignment; 118, assignment; 119, assignment; 120, assignment; 121, assignment; 122, assignment; 123, assignment; 124, identifier:display_items_model; 125, identifier:container; 126, identifier:display_items_model; 127, identifier:filter; 128, attribute; 129, argument_list; 130, identifier:display_items_model; 131, identifier:sort_key; 132, identifier:display_items_model; 133, identifier:filter_id; 134, attribute; 135, attribute; 136, attribute; 137, call; 138, attribute; 139, attribute; 140, attribute; 141, True; 142, attribute; 143, identifier:filter_id; 144, attribute; 145, attribute; 146, attribute; 147, call; 148, attribute; 149, attribute; 150, attribute; 151, True; 152, attribute; 153, identifier:filter_id; 154, attribute; 155, attribute; 156, attribute; 157, call; 158, attribute; 159, attribute; 160, attribute; 161, True; 162, attribute; 163, identifier:filter_id; 164, attribute; 165, attribute; 166, attribute; 167, call; 168, attribute; 169, attribute; 170, attribute; 171, True; 172, attribute; 173, None; 174, identifier:ListModel; 175, identifier:Filter; 176, True; 177, identifier:display_items_model; 178, identifier:container; 179, identifier:self; 180, identifier:document_model; 181, identifier:display_items_model; 182, identifier:filter; 183, attribute; 184, argument_list; 185, identifier:display_items_model; 186, identifier:sort_key; 187, identifier:DataItem; 188, identifier:sort_by_date_key; 189, identifier:display_items_model; 190, identifier:sort_reverse; 191, identifier:display_items_model; 192, identifier:filter_id; 193, identifier:display_items_model; 194, identifier:container; 195, identifier:self; 196, identifier:document_model; 197, identifier:display_items_model; 198, identifier:filter; 199, attribute; 200, argument_list; 201, identifier:display_items_model; 202, identifier:sort_key; 203, identifier:DataItem; 204, identifier:sort_by_date_key; 205, identifier:display_items_model; 206, identifier:sort_reverse; 207, identifier:display_items_model; 208, identifier:filter_id; 209, identifier:display_items_model; 210, identifier:container; 211, identifier:self; 212, identifier:document_model; 213, identifier:display_items_model; 214, identifier:filter; 215, attribute; 216, argument_list; 217, identifier:display_items_model; 218, identifier:sort_key; 219, identifier:DataItem; 220, identifier:sort_by_date_key; 221, identifier:display_items_model; 222, identifier:sort_reverse; 223, identifier:display_items_model; 224, identifier:filter_id; 225, identifier:display_items_model; 226, identifier:container; 227, identifier:self; 228, identifier:document_model; 229, identifier:display_items_model; 230, identifier:filter; 231, attribute; 232, argument_list; 233, identifier:display_items_model; 234, identifier:sort_key; 235, identifier:DataItem; 236, identifier:sort_by_date_key; 237, identifier:display_items_model; 238, identifier:sort_reverse; 239, identifier:display_items_model; 240, identifier:filter_id; 241, identifier:ListModel; 242, identifier:EqFilter; 243, string:"session_id"; 244, attribute; 245, identifier:ListModel; 246, identifier:NotEqFilter; 247, string:"category"; 248, string:"persistent"; 249, identifier:ListModel; 250, identifier:Filter; 251, False; 252, identifier:ListModel; 253, identifier:EqFilter; 254, string:"category"; 255, string:"persistent"; 256, attribute; 257, identifier:session_id; 258, identifier:self; 259, identifier:document_model | 0, 1; 1, 2; 1, 3; 1, 4; 1, 5; 3, 6; 3, 7; 3, 8; 3, 9; 4, 10; 5, 11; 5, 12; 7, 13; 7, 14; 8, 15; 8, 16; 9, 17; 9, 18; 11, 19; 12, 20; 12, 21; 12, 22; 14, 23; 16, 24; 18, 25; 20, 26; 22, 27; 23, 28; 23, 29; 24, 30; 24, 31; 25, 32; 25, 33; 26, 34; 27, 35; 27, 36; 27, 37; 27, 38; 27, 39; 27, 40; 30, 41; 30, 42; 31, 43; 31, 44; 32, 45; 32, 46; 34, 47; 34, 48; 35, 49; 35, 50; 36, 51; 36, 52; 36, 53; 36, 54; 37, 55; 37, 56; 38, 57; 38, 58; 39, 59; 39, 60; 39, 61; 40, 62; 40, 63; 47, 64; 47, 65; 51, 66; 52, 67; 53, 68; 54, 69; 55, 70; 55, 71; 56, 72; 56, 73; 56, 74; 56, 75; 56, 76; 57, 77; 57, 78; 58, 79; 58, 80; 58, 81; 58, 82; 58, 83; 59, 84; 59, 85; 61, 86; 61, 87; 61, 88; 61, 89; 61, 90; 63, 91; 63, 92; 63, 93; 63, 94; 63, 95; 66, 96; 66, 97; 67, 98; 67, 99; 68, 100; 68, 101; 69, 102; 69, 103; 72, 104; 73, 105; 74, 106; 75, 107; 76, 108; 79, 109; 80, 110; 81, 111; 82, 112; 83, 113; 86, 114; 87, 115; 88, 116; 89, 117; 90, 118; 91, 119; 92, 120; 93, 121; 94, 122; 95, 123; 96, 124; 96, 125; 98, 126; 98, 127; 99, 128; 99, 129; 100, 130; 100, 131; 102, 132; 102, 133; 104, 134; 104, 135; 105, 136; 105, 137; 106, 138; 106, 139; 107, 140; 107, 141; 108, 142; 108, 143; 109, 144; 109, 145; 110, 146; 110, 147; 111, 148; 111, 149; 112, 150; 112, 151; 113, 152; 113, 153; 114, 154; 114, 155; 115, 156; 115, 157; 116, 158; 116, 159; 117, 160; 117, 161; 118, 162; 118, 163; 119, 164; 119, 165; 120, 166; 120, 167; 121, 168; 121, 169; 122, 170; 122, 171; 123, 172; 123, 173; 128, 174; 128, 175; 129, 176; 134, 177; 134, 178; 135, 179; 135, 180; 136, 181; 136, 182; 137, 183; 137, 184; 138, 185; 138, 186; 139, 187; 139, 188; 140, 189; 140, 190; 142, 191; 142, 192; 144, 193; 144, 194; 145, 195; 145, 196; 146, 197; 146, 198; 147, 199; 147, 200; 148, 201; 148, 202; 149, 203; 149, 204; 150, 205; 150, 206; 152, 207; 152, 208; 154, 209; 154, 210; 155, 211; 155, 212; 156, 213; 156, 214; 157, 215; 157, 216; 158, 217; 158, 218; 159, 219; 159, 220; 160, 221; 160, 222; 162, 223; 162, 224; 164, 225; 164, 226; 165, 227; 165, 228; 166, 229; 166, 230; 167, 231; 167, 232; 168, 233; 168, 234; 169, 235; 169, 236; 170, 237; 170, 238; 172, 239; 172, 240; 183, 241; 183, 242; 184, 243; 184, 244; 199, 245; 199, 246; 200, 247; 200, 248; 215, 249; 215, 250; 216, 251; 231, 252; 231, 253; 232, 254; 232, 255; 244, 256; 244, 257; 256, 258; 256, 259 | def __update_display_items_model(self, display_items_model: ListModel.FilteredListModel, data_group: typing.Optional[DataGroup.DataGroup], filter_id: typing.Optional[str]) -> None:
"""Update the data item model with a new container, filter, and sorting.
This is called when the data item model is created or when the user changes
the data group or sorting settings.
"""
with display_items_model.changes(): # change filter and sort together
if data_group is not None:
display_items_model.container = data_group
display_items_model.filter = ListModel.Filter(True)
display_items_model.sort_key = None
display_items_model.filter_id = None
elif filter_id == "latest-session":
display_items_model.container = self.document_model
display_items_model.filter = ListModel.EqFilter("session_id", self.document_model.session_id)
display_items_model.sort_key = DataItem.sort_by_date_key
display_items_model.sort_reverse = True
display_items_model.filter_id = filter_id
elif filter_id == "temporary":
display_items_model.container = self.document_model
display_items_model.filter = ListModel.NotEqFilter("category", "persistent")
display_items_model.sort_key = DataItem.sort_by_date_key
display_items_model.sort_reverse = True
display_items_model.filter_id = filter_id
elif filter_id == "none": # not intended to be used directly
display_items_model.container = self.document_model
display_items_model.filter = ListModel.Filter(False)
display_items_model.sort_key = DataItem.sort_by_date_key
display_items_model.sort_reverse = True
display_items_model.filter_id = filter_id
else: # "all"
display_items_model.container = self.document_model
display_items_model.filter = ListModel.EqFilter("category", "persistent")
display_items_model.sort_key = DataItem.sort_by_date_key
display_items_model.sort_reverse = True
display_items_model.filter_id = None |
0, module; 1, function_definition; 2, function_name:sort_by_date_key; 3, parameters; 4, block; 5, identifier:data_item; 6, expression_statement; 7, return_statement; 8, comment:""" A sort key to for the created field of a data item. The sort by uuid makes it determinate. """; 9, expression_list; 10, conditional_expression:data_item.title + str(data_item.uuid) if data_item.is_live else str(); 11, attribute; 12, call; 13, binary_operator:data_item.title + str(data_item.uuid); 14, attribute; 15, call; 16, identifier:data_item; 17, identifier:date_for_sorting; 18, identifier:str; 19, argument_list; 20, attribute; 21, call; 22, identifier:data_item; 23, identifier:is_live; 24, identifier:str; 25, argument_list; 26, attribute; 27, identifier:data_item; 28, identifier:title; 29, identifier:str; 30, argument_list; 31, identifier:data_item; 32, identifier:uuid; 33, attribute; 34, identifier:data_item; 35, identifier:uuid | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 6, 8; 7, 9; 9, 10; 9, 11; 9, 12; 10, 13; 10, 14; 10, 15; 11, 16; 11, 17; 12, 18; 12, 19; 13, 20; 13, 21; 14, 22; 14, 23; 15, 24; 15, 25; 19, 26; 20, 27; 20, 28; 21, 29; 21, 30; 26, 31; 26, 32; 30, 33; 33, 34; 33, 35 | def sort_by_date_key(data_item):
""" A sort key to for the created field of a data item. The sort by uuid makes it determinate. """
return data_item.title + str(data_item.uuid) if data_item.is_live else str(), data_item.date_for_sorting, str(data_item.uuid) |
0, module; 1, function_definition; 2, function_name:open_mfbpchdataset; 3, parameters; 4, block; 5, identifier:paths; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, dictionary_splat_pattern; 11, expression_statement; 12, import_from_statement; 13, comment:# TODO: Include file locks?; 14, comment:# Check for dask; 15, expression_statement; 16, if_statement; 17, expression_statement; 18, comment:# Add th; 19, if_statement; 20, if_statement; 21, expression_statement; 22, expression_statement; 23, if_statement; 24, comment:# Concatenate over time; 25, expression_statement; 26, expression_statement; 27, expression_statement; 28, expression_statement; 29, expression_statement; 30, expression_statement; 31, return_statement; 32, identifier:concat_dim; 33, string; 34, identifier:compat; 35, string; 36, identifier:preprocess; 37, None; 38, identifier:lock; 39, None; 40, identifier:kwargs; 41, comment:""" Open multiple bpch files as a single dataset.
You must have dask installed for this to work, as this greatly
simplifies issues relating to multi-file I/O.
Also, please note that this is not a very performant routine. I/O is still
limited by the fact that we need to manually scan/read through each bpch
file so that we can figure out what its contents are, since that metadata
isn't saved anywhere. So this routine will actually sequentially load
Datasets for each bpch file, then concatenate them along the "time" axis.
You may wish to simply process each file individually, coerce to NetCDF,
and then ingest through xarray as normal.
Parameters
----------
paths : list of strs
Filenames to load; order doesn't matter as they will be
lexicographically sorted before we read in the data
concat_dim : str, default='time'
Dimension to concatenate Datasets over. We default to "time" since this
is how GEOS-Chem splits output files
compat : str (optional)
String indicating how to compare variables of the same name for
potential conflicts when merging:
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'equals': all values and dimensions must be the same.
- 'identical': all values, dimensions and attributes must be the
same.
- 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
preprocess : callable (optional)
A pre-processing function to apply to each Dataset prior to
concatenation
lock : False, True, or threading.Lock (optional)
Passed to :py:func:`dask.array.from_array`. By default, xarray
employs a per-variable lock when reading data from NetCDF files,
but this model has not yet been extended or implemented for bpch files
and so this is not actually used. However, it is likely necessary
before dask's multi-threaded backend can be used
**kwargs : optional
Additional arguments to pass to :py:func:`xbpch.open_bpchdataset`.
"""; 42, dotted_name; 43, dotted_name; 44, assignment; 45, not_operator; 46, block; 47, assignment; 48, call; 49, block; 50, not_operator; 51, block; 52, assignment; 53, assignment; 54, comparison_operator:preprocess is not None; 55, block; 56, assignment; 57, assignment; 58, assignment; 59, assignment; 60, assignment; 61, assignment; 62, identifier:combined; 63, string_content:time; 64, string_content:no_conflicts; 65, identifier:xarray; 66, identifier:backends; 67, identifier:api; 68, identifier:_MultiFileCloser; 69, identifier:dask; 70, call; 71, identifier:dask; 72, raise_statement; 73, subscript; 74, True; 75, identifier:isinstance; 76, argument_list; 77, expression_statement; 78, identifier:paths; 79, raise_statement; 80, identifier:datasets; 81, list_comprehension; 82, identifier:bpch_objs; 83, list_comprehension; 84, identifier:preprocess; 85, None; 86, expression_statement; 87, identifier:combined; 88, call; 89, attribute; 90, call; 91, attribute; 92, attribute; 93, identifier:ts; 94, call; 95, identifier:fns_str; 96, call; 97, subscript; 98, parenthesized_expression; 99, attribute; 100, argument_list; 101, call; 102, identifier:kwargs; 103, string; 104, identifier:paths; 105, identifier:basestring; 106, assignment; 107, call; 108, call; 109, for_in_clause; 110, attribute; 111, for_in_clause; 112, assignment; 113, attribute; 114, argument_list; 115, identifier:combined; 116, identifier:_file_obj; 117, identifier:_MultiFileCloser; 118, argument_list; 119, identifier:combined; 120, identifier:attrs; 121, subscript; 122, identifier:attrs; 123, identifier:get_timestamp; 124, argument_list; 125, attribute; 126, argument_list; 127, attribute; 128, string; 129, call; 130, identifier:kwargs; 131, identifier:pop; 132, string; 133, False; 134, identifier:ValueError; 135, argument_list; 136, string_content:dask; 137, identifier:paths; 138, call; 139, identifier:IOError; 140, argument_list; 141, identifier:open_bpchdataset; 142, argument_list; 143, identifier:filename; 144, identifier:paths; 145, identifier:ds; 146, identifier:_file_obj; 147, identifier:ds; 148, identifier:datasets; 149, identifier:datasets; 150, list_comprehension; 151, identifier:xr; 152, identifier:auto_combine; 153, identifier:datasets; 154, keyword_argument; 155, keyword_argument; 156, identifier:bpch_objs; 157, identifier:datasets; 158, integer:0; 159, string:" "; 160, identifier:join; 161, identifier:paths; 162, identifier:combined; 163, identifier:attrs; 164, string_content:history; 165, attribute; 166, argument_list; 167, string_content:dask; 168, string:"Reading multiple files without dask is not supported"; 169, identifier:sorted; 170, argument_list; 171, string:"No paths to files were passed into open_mfbpchdataset"; 172, identifier:filename; 173, dictionary_splat; 174, call; 175, for_in_clause; 176, identifier:compat; 177, identifier:compat; 178, identifier:concat_dim; 179, identifier:concat_dim; 180, string:"{}: Processed/loaded by xbpch-{} from {}"; 181, identifier:format; 182, identifier:ts; 183, identifier:ver; 184, identifier:fns_str; 185, call; 186, identifier:kwargs; 187, identifier:preprocess; 188, argument_list; 189, identifier:ds; 190, identifier:datasets; 191, identifier:glob; 192, argument_list; 193, identifier:ds; 194, identifier:paths | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 4, 22; 4, 23; 4, 24; 4, 25; 4, 26; 4, 27; 4, 28; 4, 29; 4, 30; 4, 31; 6, 32; 6, 33; 7, 34; 7, 35; 8, 36; 8, 37; 9, 38; 9, 39; 10, 40; 11, 41; 12, 42; 12, 43; 15, 44; 16, 45; 16, 46; 17, 47; 19, 48; 19, 49; 20, 50; 20, 51; 21, 52; 22, 53; 23, 54; 23, 55; 25, 56; 26, 57; 27, 58; 28, 59; 29, 60; 30, 61; 31, 62; 33, 63; 35, 64; 42, 65; 42, 66; 42, 67; 43, 68; 44, 69; 44, 70; 45, 71; 46, 72; 47, 73; 47, 74; 48, 75; 48, 76; 49, 77; 50, 78; 51, 79; 52, 80; 52, 81; 53, 82; 53, 83; 54, 84; 54, 85; 55, 86; 56, 87; 56, 88; 57, 89; 57, 90; 58, 91; 58, 92; 59, 93; 59, 94; 60, 95; 60, 96; 61, 97; 61, 98; 70, 99; 70, 100; 72, 101; 73, 102; 73, 103; 76, 104; 76, 105; 77, 106; 79, 107; 81, 108; 81, 109; 83, 110; 83, 111; 86, 112; 88, 113; 88, 114; 89, 115; 89, 116; 90, 117; 90, 118; 91, 119; 91, 120; 92, 121; 92, 122; 94, 123; 94, 124; 96, 125; 96, 126; 97, 127; 97, 128; 98, 129; 99, 130; 99, 131; 100, 132; 100, 133; 101, 134; 101, 135; 103, 136; 106, 137; 106, 138; 107, 139; 107, 140; 108, 141; 108, 142; 109, 143; 109, 144; 110, 145; 110, 146; 111, 147; 111, 148; 112, 149; 112, 150; 113, 151; 113, 152; 114, 153; 114, 154; 114, 155; 118, 156; 121, 157; 121, 158; 125, 159; 125, 160; 126, 161; 127, 162; 127, 163; 128, 164; 129, 165; 129, 166; 132, 167; 135, 168; 138, 169; 138, 170; 140, 171; 142, 172; 142, 173; 150, 174; 150, 175; 154, 176; 154, 177; 155, 178; 155, 179; 165, 180; 165, 181; 166, 182; 166, 183; 166, 184; 170, 185; 173, 186; 174, 187; 174, 188; 175, 189; 175, 190; 185, 191; 185, 192; 188, 193; 192, 194 | def open_mfbpchdataset(paths, concat_dim='time', compat='no_conflicts',
preprocess=None, lock=None, **kwargs):
""" Open multiple bpch files as a single dataset.
You must have dask installed for this to work, as this greatly
simplifies issues relating to multi-file I/O.
Also, please note that this is not a very performant routine. I/O is still
limited by the fact that we need to manually scan/read through each bpch
file so that we can figure out what its contents are, since that metadata
isn't saved anywhere. So this routine will actually sequentially load
Datasets for each bpch file, then concatenate them along the "time" axis.
You may wish to simply process each file individually, coerce to NetCDF,
and then ingest through xarray as normal.
Parameters
----------
paths : list of strs
Filenames to load; order doesn't matter as they will be
lexicographically sorted before we read in the data
concat_dim : str, default='time'
Dimension to concatenate Datasets over. We default to "time" since this
is how GEOS-Chem splits output files
compat : str (optional)
String indicating how to compare variables of the same name for
potential conflicts when merging:
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'equals': all values and dimensions must be the same.
- 'identical': all values, dimensions and attributes must be the
same.
- 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
preprocess : callable (optional)
A pre-processing function to apply to each Dataset prior to
concatenation
lock : False, True, or threading.Lock (optional)
Passed to :py:func:`dask.array.from_array`. By default, xarray
employs a per-variable lock when reading data from NetCDF files,
but this model has not yet been extended or implemented for bpch files
and so this is not actually used. However, it is likely necessary
before dask's multi-threaded backend can be used
**kwargs : optional
Additional arguments to pass to :py:func:`xbpch.open_bpchdataset`.
"""
from xarray.backends.api import _MultiFileCloser
# TODO: Include file locks?
# Check for dask
dask = kwargs.pop('dask', False)
if not dask:
raise ValueError("Reading multiple files without dask is not supported")
kwargs['dask'] = True
# Add th
if isinstance(paths, basestring):
paths = sorted(glob(paths))
if not paths:
raise IOError("No paths to files were passed into open_mfbpchdataset")
datasets = [open_bpchdataset(filename, **kwargs)
for filename in paths]
bpch_objs = [ds._file_obj for ds in datasets]
if preprocess is not None:
datasets = [preprocess(ds) for ds in datasets]
# Concatenate over time
combined = xr.auto_combine(datasets, compat=compat, concat_dim=concat_dim)
combined._file_obj = _MultiFileCloser(bpch_objs)
combined.attrs = datasets[0].attrs
ts = get_timestamp()
fns_str = " ".join(paths)
combined.attrs['history'] = (
"{}: Processed/loaded by xbpch-{} from {}"
.format(ts, ver, fns_str)
)
return combined |
0, module; 1, function_definition; 2, function_name:_get_archive_filelist; 3, parameters; 4, comment:# type: (str) -> List[str]; 5, block; 6, identifier:filename; 7, expression_statement; 8, expression_statement; 9, comment:# type: List[str]; 10, if_statement; 11, if_statement; 12, return_statement; 13, comment:"""Extract the list of files from a tar or zip archive.
Args:
filename: name of the archive
Returns:
Sorted list of files in the archive, excluding './'
Raises:
ValueError: when the file is neither a zip nor a tar archive
FileNotFoundError: when the provided file does not exist (for Python 3)
IOError: when the provided file does not exist (for Python 2)
"""; 14, assignment; 15, call; 16, block; 17, elif_clause; 18, else_clause; 19, comparison_operator:"./" in names; 20, block; 21, identifier:names; 22, identifier:names; 23, list; 24, attribute; 25, argument_list; 26, with_statement; 27, call; 28, block; 29, block; 30, string:"./"; 31, identifier:names; 32, expression_statement; 33, identifier:tarfile; 34, identifier:is_tarfile; 35, identifier:filename; 36, with_clause; 37, block; 38, attribute; 39, argument_list; 40, with_statement; 41, raise_statement; 42, call; 43, with_item; 44, expression_statement; 45, identifier:zipfile; 46, identifier:is_zipfile; 47, identifier:filename; 48, with_clause; 49, block; 50, call; 51, attribute; 52, argument_list; 53, as_pattern; 54, assignment; 55, with_item; 56, expression_statement; 57, identifier:ValueError; 58, argument_list; 59, identifier:names; 60, identifier:remove; 61, string:"./"; 62, call; 63, as_pattern_target; 64, identifier:names; 65, call; 66, as_pattern; 67, assignment; 68, call; 69, attribute; 70, argument_list; 71, identifier:tar_file; 72, identifier:sorted; 73, argument_list; 74, call; 75, as_pattern_target; 76, identifier:names; 77, call; 78, attribute; 79, argument_list; 80, identifier:tarfile; 81, identifier:open; 82, identifier:filename; 83, call; 84, attribute; 85, argument_list; 86, identifier:zip_file; 87, identifier:sorted; 88, argument_list; 89, concatenated_string; 90, identifier:format; 91, identifier:filename; 92, attribute; 93, argument_list; 94, identifier:zipfile; 95, identifier:ZipFile; 96, identifier:filename; 97, call; 98, string:"Can not get filenames from '{!s}'. "; 99, string:"Not a tar or zip file"; 100, identifier:tar_file; 101, identifier:getnames; 102, attribute; 103, argument_list; 104, identifier:zip_file; 105, identifier:namelist | 0, 1; 1, 2; 1, 3; 1, 4; 1, 5; 3, 6; 5, 7; 5, 8; 5, 9; 5, 10; 5, 11; 5, 12; 7, 13; 8, 14; 10, 15; 10, 16; 10, 17; 10, 18; 11, 19; 11, 20; 12, 21; 14, 22; 14, 23; 15, 24; 15, 25; 16, 26; 17, 27; 17, 28; 18, 29; 19, 30; 19, 31; 20, 32; 24, 33; 24, 34; 25, 35; 26, 36; 26, 37; 27, 38; 27, 39; 28, 40; 29, 41; 32, 42; 36, 43; 37, 44; 38, 45; 38, 46; 39, 47; 40, 48; 40, 49; 41, 50; 42, 51; 42, 52; 43, 53; 44, 54; 48, 55; 49, 56; 50, 57; 50, 58; 51, 59; 51, 60; 52, 61; 53, 62; 53, 63; 54, 64; 54, 65; 55, 66; 56, 67; 58, 68; 62, 69; 62, 70; 63, 71; 65, 72; 65, 73; 66, 74; 66, 75; 67, 76; 67, 77; 68, 78; 68, 79; 69, 80; 69, 81; 70, 82; 73, 83; 74, 84; 74, 85; 75, 86; 77, 87; 77, 88; 78, 89; 78, 90; 79, 91; 83, 92; 83, 93; 84, 94; 84, 95; 85, 96; 88, 97; 89, 98; 89, 99; 92, 100; 92, 101; 97, 102; 97, 103; 102, 104; 102, 105 | def _get_archive_filelist(filename):
# type: (str) -> List[str]
"""Extract the list of files from a tar or zip archive.
Args:
filename: name of the archive
Returns:
Sorted list of files in the archive, excluding './'
Raises:
ValueError: when the file is neither a zip nor a tar archive
FileNotFoundError: when the provided file does not exist (for Python 3)
IOError: when the provided file does not exist (for Python 2)
"""
names = [] # type: List[str]
if tarfile.is_tarfile(filename):
with tarfile.open(filename) as tar_file:
names = sorted(tar_file.getnames())
elif zipfile.is_zipfile(filename):
with zipfile.ZipFile(filename) as zip_file:
names = sorted(zip_file.namelist())
else:
raise ValueError("Can not get filenames from '{!s}'. "
"Not a tar or zip file".format(filename))
if "./" in names:
names.remove("./")
return names |
0, module; 1, function_definition; 2, function_name:pbar_strings; 3, parameters; 4, block; 5, identifier:files; 6, default_parameter; 7, dictionary_splat_pattern; 8, expression_statement; 9, return_statement; 10, identifier:desc; 11, string; 12, identifier:kwargs; 13, comment:"""Wrapper for `tqdm` progress bar which also sorts list of strings
"""; 14, call; 15, identifier:tqdm; 16, argument_list; 17, call; 18, keyword_argument; 19, keyword_argument; 20, dictionary_splat; 21, identifier:sorted; 22, argument_list; 23, identifier:desc; 24, parenthesized_expression; 25, identifier:dynamic_ncols; 26, True; 27, identifier:kwargs; 28, identifier:files; 29, keyword_argument; 30, binary_operator:'<' + str(datetime.now().strftime("%Y-%m-%d %H:%M:%S")) + '> ' +
desc; 31, identifier:key; 32, lambda; 33, binary_operator:'<' + str(datetime.now().strftime("%Y-%m-%d %H:%M:%S")) + '> '; 34, identifier:desc; 35, lambda_parameters; 36, call; 37, binary_operator:'<' + str(datetime.now().strftime("%Y-%m-%d %H:%M:%S")); 38, string; 39, identifier:s; 40, attribute; 41, argument_list; 42, string; 43, call; 44, string_content:>; 45, identifier:s; 46, identifier:lower; 47, string_content:<; 48, identifier:str; 49, argument_list; 50, call; 51, attribute; 52, argument_list; 53, call; 54, identifier:strftime; 55, string:"%Y-%m-%d %H:%M:%S"; 56, attribute; 57, argument_list; 58, identifier:datetime; 59, identifier:now | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 6, 10; 6, 11; 7, 12; 8, 13; 9, 14; 14, 15; 14, 16; 16, 17; 16, 18; 16, 19; 16, 20; 17, 21; 17, 22; 18, 23; 18, 24; 19, 25; 19, 26; 20, 27; 22, 28; 22, 29; 24, 30; 29, 31; 29, 32; 30, 33; 30, 34; 32, 35; 32, 36; 33, 37; 33, 38; 35, 39; 36, 40; 36, 41; 37, 42; 37, 43; 38, 44; 40, 45; 40, 46; 42, 47; 43, 48; 43, 49; 49, 50; 50, 51; 50, 52; 51, 53; 51, 54; 52, 55; 53, 56; 53, 57; 56, 58; 56, 59 | def pbar_strings(files, desc='', **kwargs):
"""Wrapper for `tqdm` progress bar which also sorts list of strings
"""
return tqdm(
sorted(files, key=lambda s: s.lower()),
desc=('<' + str(datetime.now().strftime("%Y-%m-%d %H:%M:%S")) + '> ' +
desc),
dynamic_ncols=True,
**kwargs) |
0, module; 1, function_definition; 2, function_name:sort_func; 3, parameters; 4, block; 5, identifier:self; 6, identifier:key; 7, expression_statement; 8, if_statement; 9, if_statement; 10, if_statement; 11, return_statement; 12, comment:"""Logic for sorting keys in a `Spectrum` relative to one another."""; 13, comparison_operator:key == self._KEYS.TIME; 14, block; 15, comparison_operator:key == self._KEYS.DATA; 16, block; 17, comparison_operator:key == self._KEYS.SOURCE; 18, block; 19, identifier:key; 20, identifier:key; 21, attribute; 22, return_statement; 23, identifier:key; 24, attribute; 25, return_statement; 26, identifier:key; 27, attribute; 28, return_statement; 29, attribute; 30, identifier:TIME; 31, string; 32, attribute; 33, identifier:DATA; 34, string; 35, attribute; 36, identifier:SOURCE; 37, string; 38, identifier:self; 39, identifier:_KEYS; 40, string_content:aaa; 41, identifier:self; 42, identifier:_KEYS; 43, string_content:zzy; 44, identifier:self; 45, identifier:_KEYS; 46, string_content:zzz | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 7, 12; 8, 13; 8, 14; 9, 15; 9, 16; 10, 17; 10, 18; 11, 19; 13, 20; 13, 21; 14, 22; 15, 23; 15, 24; 16, 25; 17, 26; 17, 27; 18, 28; 21, 29; 21, 30; 22, 31; 24, 32; 24, 33; 25, 34; 27, 35; 27, 36; 28, 37; 29, 38; 29, 39; 31, 40; 32, 41; 32, 42; 34, 43; 35, 44; 35, 45; 37, 46 | def sort_func(self, key):
"""Logic for sorting keys in a `Spectrum` relative to one another."""
if key == self._KEYS.TIME:
return 'aaa'
if key == self._KEYS.DATA:
return 'zzy'
if key == self._KEYS.SOURCE:
return 'zzz'
return key |
0, module; 1, function_definition; 2, function_name:sort_func; 3, parameters; 4, block; 5, identifier:self; 6, identifier:key; 7, expression_statement; 8, if_statement; 9, if_statement; 10, return_statement; 11, comment:"""Sorting logic for `Quantity` objects."""; 12, comparison_operator:key == self._KEYS.VALUE; 13, block; 14, comparison_operator:key == self._KEYS.SOURCE; 15, block; 16, identifier:key; 17, identifier:key; 18, attribute; 19, return_statement; 20, identifier:key; 21, attribute; 22, return_statement; 23, attribute; 24, identifier:VALUE; 25, string; 26, attribute; 27, identifier:SOURCE; 28, string; 29, identifier:self; 30, identifier:_KEYS; 31, string_content:aaa; 32, identifier:self; 33, identifier:_KEYS; 34, string_content:zzz | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 7, 11; 8, 12; 8, 13; 9, 14; 9, 15; 10, 16; 12, 17; 12, 18; 13, 19; 14, 20; 14, 21; 15, 22; 18, 23; 18, 24; 19, 25; 21, 26; 21, 27; 22, 28; 23, 29; 23, 30; 25, 31; 26, 32; 26, 33; 28, 34 | def sort_func(self, key):
"""Sorting logic for `Quantity` objects."""
if key == self._KEYS.VALUE:
return 'aaa'
if key == self._KEYS.SOURCE:
return 'zzz'
return key |
0, module; 1, function_definition; 2, function_name:sort_func; 3, parameters; 4, block; 5, identifier:self; 6, identifier:key; 7, expression_statement; 8, if_statement; 9, if_statement; 10, if_statement; 11, if_statement; 12, if_statement; 13, if_statement; 14, if_statement; 15, return_statement; 16, comment:"""Used to sort keys when writing Entry to JSON format.
Should be supplemented/overridden by inheriting classes.
"""; 17, comparison_operator:key == self._KEYS.SCHEMA; 18, block; 19, comparison_operator:key == self._KEYS.NAME; 20, block; 21, comparison_operator:key == self._KEYS.SOURCES; 22, block; 23, comparison_operator:key == self._KEYS.ALIAS; 24, block; 25, comparison_operator:key == self._KEYS.MODELS; 26, block; 27, comparison_operator:key == self._KEYS.PHOTOMETRY; 28, block; 29, comparison_operator:key == self._KEYS.SPECTRA; 30, block; 31, identifier:key; 32, identifier:key; 33, attribute; 34, return_statement; 35, identifier:key; 36, attribute; 37, return_statement; 38, identifier:key; 39, attribute; 40, return_statement; 41, identifier:key; 42, attribute; 43, return_statement; 44, identifier:key; 45, attribute; 46, return_statement; 47, identifier:key; 48, attribute; 49, return_statement; 50, identifier:key; 51, attribute; 52, return_statement; 53, attribute; 54, identifier:SCHEMA; 55, string; 56, attribute; 57, identifier:NAME; 58, string; 59, attribute; 60, identifier:SOURCES; 61, string; 62, attribute; 63, identifier:ALIAS; 64, string; 65, attribute; 66, identifier:MODELS; 67, string; 68, attribute; 69, identifier:PHOTOMETRY; 70, string; 71, attribute; 72, identifier:SPECTRA; 73, string; 74, identifier:self; 75, identifier:_KEYS; 76, string_content:aaa; 77, identifier:self; 78, identifier:_KEYS; 79, string_content:aab; 80, identifier:self; 81, identifier:_KEYS; 82, string_content:aac; 83, identifier:self; 84, identifier:_KEYS; 85, string_content:aad; 86, identifier:self; 87, identifier:_KEYS; 88, string_content:aae; 89, identifier:self; 90, identifier:_KEYS; 91, string_content:zzy; 92, identifier:self; 93, identifier:_KEYS; 94, string_content:zzz | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 7, 16; 8, 17; 8, 18; 9, 19; 9, 20; 10, 21; 10, 22; 11, 23; 11, 24; 12, 25; 12, 26; 13, 27; 13, 28; 14, 29; 14, 30; 15, 31; 17, 32; 17, 33; 18, 34; 19, 35; 19, 36; 20, 37; 21, 38; 21, 39; 22, 40; 23, 41; 23, 42; 24, 43; 25, 44; 25, 45; 26, 46; 27, 47; 27, 48; 28, 49; 29, 50; 29, 51; 30, 52; 33, 53; 33, 54; 34, 55; 36, 56; 36, 57; 37, 58; 39, 59; 39, 60; 40, 61; 42, 62; 42, 63; 43, 64; 45, 65; 45, 66; 46, 67; 48, 68; 48, 69; 49, 70; 51, 71; 51, 72; 52, 73; 53, 74; 53, 75; 55, 76; 56, 77; 56, 78; 58, 79; 59, 80; 59, 81; 61, 82; 62, 83; 62, 84; 64, 85; 65, 86; 65, 87; 67, 88; 68, 89; 68, 90; 70, 91; 71, 92; 71, 93; 73, 94 | def sort_func(self, key):
"""Used to sort keys when writing Entry to JSON format.
Should be supplemented/overridden by inheriting classes.
"""
if key == self._KEYS.SCHEMA:
return 'aaa'
if key == self._KEYS.NAME:
return 'aab'
if key == self._KEYS.SOURCES:
return 'aac'
if key == self._KEYS.ALIAS:
return 'aad'
if key == self._KEYS.MODELS:
return 'aae'
if key == self._KEYS.PHOTOMETRY:
return 'zzy'
if key == self._KEYS.SPECTRA:
return 'zzz'
return key |
0, module; 1, function_definition; 2, function_name:sort_func; 3, parameters; 4, block; 5, identifier:self; 6, identifier:key; 7, expression_statement; 8, if_statement; 9, if_statement; 10, if_statement; 11, return_statement; 12, comment:"""Specify order for attributes."""; 13, comparison_operator:key == self._KEYS.TIME; 14, block; 15, comparison_operator:key == self._KEYS.MODEL; 16, block; 17, comparison_operator:key == self._KEYS.SOURCE; 18, block; 19, identifier:key; 20, identifier:key; 21, attribute; 22, return_statement; 23, identifier:key; 24, attribute; 25, return_statement; 26, identifier:key; 27, attribute; 28, return_statement; 29, attribute; 30, identifier:TIME; 31, string; 32, attribute; 33, identifier:MODEL; 34, string; 35, attribute; 36, identifier:SOURCE; 37, string; 38, identifier:self; 39, identifier:_KEYS; 40, string_content:aaa; 41, identifier:self; 42, identifier:_KEYS; 43, string_content:zzy; 44, identifier:self; 45, identifier:_KEYS; 46, string_content:zzz | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 7, 12; 8, 13; 8, 14; 9, 15; 9, 16; 10, 17; 10, 18; 11, 19; 13, 20; 13, 21; 14, 22; 15, 23; 15, 24; 16, 25; 17, 26; 17, 27; 18, 28; 21, 29; 21, 30; 22, 31; 24, 32; 24, 33; 25, 34; 27, 35; 27, 36; 28, 37; 29, 38; 29, 39; 31, 40; 32, 41; 32, 42; 34, 43; 35, 44; 35, 45; 37, 46 | def sort_func(self, key):
"""Specify order for attributes."""
if key == self._KEYS.TIME:
return 'aaa'
if key == self._KEYS.MODEL:
return 'zzy'
if key == self._KEYS.SOURCE:
return 'zzz'
return key |
0, module; 1, function_definition; 2, function_name:TypeFactory; 3, parameters; 4, block; 5, identifier:type_; 6, expression_statement; 7, if_statement; 8, for_statement; 9, raise_statement; 10, comment:"""
This function creates a standard form type from a simplified form.
>>> from datetime import date, datetime
>>> from pyws.functions.args import TypeFactory
>>> from pyws.functions.args import String, Integer, Float, Date, DateTime
>>> TypeFactory(str) == String
True
>>> TypeFactory(float) == Float
True
>>> TypeFactory(date) == Date
True
>>> TypeFactory(datetime) == DateTime
True
>>> from operator import attrgetter
>>> from pyws.functions.args import Dict
>>> dct = TypeFactory({0: 'HelloWorldDict', 'hello': str, 'world': int})
>>> issubclass(dct, Dict)
True
>>> dct.__name__
'HelloWorldDict'
>>> fields = sorted(dct.fields, key=attrgetter('name'))
>>> len(dct.fields)
2
>>> fields[0].name == 'hello'
True
>>> fields[0].type == String
True
>>> fields[1].name == 'world'
True
>>> fields[1].type == Integer
True
>>> from pyws.functions.args import List
>>> lst = TypeFactory([int])
>>> issubclass(lst, List)
True
>>> lst.__name__
'IntegerList'
>>> lst.element_type == Integer
True
"""; 11, boolean_operator; 12, block; 13, identifier:x; 14, identifier:__types__; 15, block; 16, call; 17, call; 18, call; 19, return_statement; 20, if_statement; 21, identifier:UnknownType; 22, argument_list; 23, identifier:isinstance; 24, argument_list; 25, identifier:issubclass; 26, argument_list; 27, identifier:type_; 28, call; 29, block; 30, identifier:type_; 31, identifier:type_; 32, identifier:type; 33, identifier:type_; 34, identifier:Type; 35, attribute; 36, argument_list; 37, return_statement; 38, identifier:x; 39, identifier:represents; 40, identifier:type_; 41, call; 42, attribute; 43, argument_list; 44, identifier:x; 45, identifier:get; 46, identifier:type_ | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 6, 10; 7, 11; 7, 12; 8, 13; 8, 14; 8, 15; 9, 16; 11, 17; 11, 18; 12, 19; 15, 20; 16, 21; 16, 22; 17, 23; 17, 24; 18, 25; 18, 26; 19, 27; 20, 28; 20, 29; 22, 30; 24, 31; 24, 32; 26, 33; 26, 34; 28, 35; 28, 36; 29, 37; 35, 38; 35, 39; 36, 40; 37, 41; 41, 42; 41, 43; 42, 44; 42, 45; 43, 46 | def TypeFactory(type_):
"""
This function creates a standard form type from a simplified form.
>>> from datetime import date, datetime
>>> from pyws.functions.args import TypeFactory
>>> from pyws.functions.args import String, Integer, Float, Date, DateTime
>>> TypeFactory(str) == String
True
>>> TypeFactory(float) == Float
True
>>> TypeFactory(date) == Date
True
>>> TypeFactory(datetime) == DateTime
True
>>> from operator import attrgetter
>>> from pyws.functions.args import Dict
>>> dct = TypeFactory({0: 'HelloWorldDict', 'hello': str, 'world': int})
>>> issubclass(dct, Dict)
True
>>> dct.__name__
'HelloWorldDict'
>>> fields = sorted(dct.fields, key=attrgetter('name'))
>>> len(dct.fields)
2
>>> fields[0].name == 'hello'
True
>>> fields[0].type == String
True
>>> fields[1].name == 'world'
True
>>> fields[1].type == Integer
True
>>> from pyws.functions.args import List
>>> lst = TypeFactory([int])
>>> issubclass(lst, List)
True
>>> lst.__name__
'IntegerList'
>>> lst.element_type == Integer
True
"""
if isinstance(type_, type) and issubclass(type_, Type):
return type_
for x in __types__:
if x.represents(type_):
return x.get(type_)
raise UnknownType(type_) |
0, module; 1, function_definition; 2, function_name:get_sorted_dependencies; 3, parameters; 4, block; 5, identifier:service_model; 6, expression_statement; 7, expression_statement; 8, expression_statement; 9, expression_statement; 10, for_statement; 11, return_statement; 12, comment:"""
Returns list of application models in topological order.
It is used in order to correctly delete dependent resources.
"""; 13, assignment; 14, assignment; 15, assignment; 16, identifier:rel; 17, identifier:relations; 18, block; 19, call; 20, identifier:app_models; 21, call; 22, identifier:dependencies; 23, dictionary_comprehension; 24, identifier:relations; 25, generator_expression; 26, expression_statement; 27, identifier:stable_topological_sort; 28, argument_list; 29, identifier:list; 30, argument_list; 31, pair; 32, for_in_clause; 33, identifier:relation; 34, for_in_clause; 35, for_in_clause; 36, if_clause; 37, call; 38, identifier:app_models; 39, identifier:dependencies; 40, call; 41, identifier:model; 42, call; 43, identifier:model; 44, identifier:app_models; 45, identifier:model; 46, identifier:app_models; 47, identifier:relation; 48, attribute; 49, comparison_operator:relation.on_delete in (models.PROTECT, models.CASCADE); 50, attribute; 51, argument_list; 52, attribute; 53, argument_list; 54, identifier:set; 55, argument_list; 56, attribute; 57, identifier:related_objects; 58, attribute; 59, tuple; 60, subscript; 61, identifier:add; 62, attribute; 63, attribute; 64, identifier:get_models; 65, identifier:model; 66, identifier:_meta; 67, identifier:relation; 68, identifier:on_delete; 69, attribute; 70, attribute; 71, identifier:dependencies; 72, attribute; 73, identifier:rel; 74, identifier:related_model; 75, attribute; 76, identifier:app_config; 77, identifier:models; 78, identifier:PROTECT; 79, identifier:models; 80, identifier:CASCADE; 81, identifier:rel; 82, identifier:model; 83, identifier:service_model; 84, identifier:_meta | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 6, 12; 7, 13; 8, 14; 9, 15; 10, 16; 10, 17; 10, 18; 11, 19; 13, 20; 13, 21; 14, 22; 14, 23; 15, 24; 15, 25; 18, 26; 19, 27; 19, 28; 21, 29; 21, 30; 23, 31; 23, 32; 25, 33; 25, 34; 25, 35; 25, 36; 26, 37; 28, 38; 28, 39; 30, 40; 31, 41; 31, 42; 32, 43; 32, 44; 34, 45; 34, 46; 35, 47; 35, 48; 36, 49; 37, 50; 37, 51; 40, 52; 40, 53; 42, 54; 42, 55; 48, 56; 48, 57; 49, 58; 49, 59; 50, 60; 50, 61; 51, 62; 52, 63; 52, 64; 56, 65; 56, 66; 58, 67; 58, 68; 59, 69; 59, 70; 60, 71; 60, 72; 62, 73; 62, 74; 63, 75; 63, 76; 69, 77; 69, 78; 70, 79; 70, 80; 72, 81; 72, 82; 75, 83; 75, 84 | def get_sorted_dependencies(service_model):
"""
Returns list of application models in topological order.
It is used in order to correctly delete dependent resources.
"""
app_models = list(service_model._meta.app_config.get_models())
dependencies = {model: set() for model in app_models}
relations = (
relation
for model in app_models
for relation in model._meta.related_objects
if relation.on_delete in (models.PROTECT, models.CASCADE)
)
for rel in relations:
dependencies[rel.model].add(rel.related_model)
return stable_topological_sort(app_models, dependencies) |
0, module; 1, function_definition; 2, function_name:format_time_and_value_to_segment_list; 3, parameters; 4, block; 5, identifier:time_and_value_list; 6, identifier:segments_count; 7, identifier:start_timestamp; 8, identifier:end_timestamp; 9, default_parameter; 10, expression_statement; 11, expression_statement; 12, expression_statement; 13, for_statement; 14, return_statement; 15, identifier:average; 16, False; 17, comment:"""
Format time_and_value_list to time segments
Parameters
^^^^^^^^^^
time_and_value_list: list of tuples
Have to be sorted by time
Example: [(time, value), (time, value) ...]
segments_count: integer
How many segments will be in result
Returns
^^^^^^^
List of dictionaries
Example:
[{'from': time1, 'to': time2, 'value': sum_of_values_from_time1_to_time2}, ...]
"""; 18, assignment; 19, assignment; 20, identifier:i; 21, call; 22, block; 23, identifier:segment_list; 24, identifier:segment_list; 25, list; 26, identifier:time_step; 27, binary_operator:(end_timestamp - start_timestamp) / segments_count; 28, identifier:range; 29, argument_list; 30, expression_statement; 31, expression_statement; 32, expression_statement; 33, expression_statement; 34, if_statement; 35, expression_statement; 36, parenthesized_expression; 37, identifier:segments_count; 38, identifier:segments_count; 39, assignment; 40, assignment; 41, assignment; 42, assignment; 43, boolean_operator; 44, block; 45, call; 46, binary_operator:end_timestamp - start_timestamp; 47, identifier:segment_start_timestamp; 48, binary_operator:start_timestamp + time_step * i; 49, identifier:segment_end_timestamp; 50, binary_operator:segment_start_timestamp + time_step; 51, identifier:value_list; 52, list_comprehension; 53, identifier:segment_value; 54, call; 55, identifier:average; 56, comparison_operator:len(value_list) != 0; 57, expression_statement; 58, attribute; 59, argument_list; 60, identifier:end_timestamp; 61, identifier:start_timestamp; 62, identifier:start_timestamp; 63, binary_operator:time_step * i; 64, identifier:segment_start_timestamp; 65, identifier:time_step; 66, identifier:value; 67, for_in_clause; 68, if_clause; 69, identifier:sum; 70, argument_list; 71, call; 72, integer:0; 73, augmented_assignment; 74, identifier:segment_list; 75, identifier:append; 76, dictionary; 77, identifier:time_step; 78, identifier:i; 79, pattern_list; 80, identifier:time_and_value_list; 81, boolean_operator; 82, identifier:value_list; 83, identifier:len; 84, argument_list; 85, identifier:segment_value; 86, call; 87, pair; 88, pair; 89, pair; 90, identifier:time; 91, identifier:value; 92, comparison_operator:time >= segment_start_timestamp; 93, comparison_operator:time < segment_end_timestamp; 94, identifier:value_list; 95, identifier:len; 96, argument_list; 97, string; 98, identifier:segment_start_timestamp; 99, string; 100, identifier:segment_end_timestamp; 101, string; 102, identifier:segment_value; 103, identifier:time; 104, identifier:segment_start_timestamp; 105, identifier:time; 106, identifier:segment_end_timestamp; 107, identifier:value_list; 108, string_content:from; 109, string_content:to; 110, string_content:value | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 9, 15; 9, 16; 10, 17; 11, 18; 12, 19; 13, 20; 13, 21; 13, 22; 14, 23; 18, 24; 18, 25; 19, 26; 19, 27; 21, 28; 21, 29; 22, 30; 22, 31; 22, 32; 22, 33; 22, 34; 22, 35; 27, 36; 27, 37; 29, 38; 30, 39; 31, 40; 32, 41; 33, 42; 34, 43; 34, 44; 35, 45; 36, 46; 39, 47; 39, 48; 40, 49; 40, 50; 41, 51; 41, 52; 42, 53; 42, 54; 43, 55; 43, 56; 44, 57; 45, 58; 45, 59; 46, 60; 46, 61; 48, 62; 48, 63; 50, 64; 50, 65; 52, 66; 52, 67; 52, 68; 54, 69; 54, 70; 56, 71; 56, 72; 57, 73; 58, 74; 58, 75; 59, 76; 63, 77; 63, 78; 67, 79; 67, 80; 68, 81; 70, 82; 71, 83; 71, 84; 73, 85; 73, 86; 76, 87; 76, 88; 76, 89; 79, 90; 79, 91; 81, 92; 81, 93; 84, 94; 86, 95; 86, 96; 87, 97; 87, 98; 88, 99; 88, 100; 89, 101; 89, 102; 92, 103; 92, 104; 93, 105; 93, 106; 96, 107; 97, 108; 99, 109; 101, 110 | def format_time_and_value_to_segment_list(time_and_value_list, segments_count, start_timestamp,
end_timestamp, average=False):
"""
Format time_and_value_list to time segments
Parameters
^^^^^^^^^^
time_and_value_list: list of tuples
Have to be sorted by time
Example: [(time, value), (time, value) ...]
segments_count: integer
How many segments will be in result
Returns
^^^^^^^
List of dictionaries
Example:
[{'from': time1, 'to': time2, 'value': sum_of_values_from_time1_to_time2}, ...]
"""
segment_list = []
time_step = (end_timestamp - start_timestamp) / segments_count
for i in range(segments_count):
segment_start_timestamp = start_timestamp + time_step * i
segment_end_timestamp = segment_start_timestamp + time_step
value_list = [
value for time, value in time_and_value_list
if time >= segment_start_timestamp and time < segment_end_timestamp]
segment_value = sum(value_list)
if average and len(value_list) != 0:
segment_value /= len(value_list)
segment_list.append({
'from': segment_start_timestamp,
'to': segment_end_timestamp,
'value': segment_value,
})
return segment_list |
0, module; 1, function_definition; 2, function_name:get_api_root_view; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, expression_statement; 8, expression_statement; 9, expression_statement; 10, for_statement; 11, class_definition; 12, return_statement; 13, identifier:api_urls; 14, None; 15, comment:"""
Return a basic root view.
"""; 16, assignment; 17, assignment; 18, pattern_list; 19, attribute; 20, block; 21, identifier:APIRootView; 22, argument_list; 23, block; 24, call; 25, identifier:api_root_dict; 26, call; 27, identifier:list_name; 28, attribute; 29, identifier:prefix; 30, identifier:viewset; 31, identifier:basename; 32, identifier:self; 33, identifier:registry; 34, expression_statement; 35, attribute; 36, expression_statement; 37, expression_statement; 38, function_definition; 39, attribute; 40, argument_list; 41, identifier:OrderedDict; 42, argument_list; 43, subscript; 44, identifier:name; 45, assignment; 46, identifier:views; 47, identifier:APIView; 48, assignment; 49, assignment; 50, function_name:get; 51, parameters; 52, comment:# Return a plain {"name": "hyperlink"} response.; 53, block; 54, identifier:APIRootView; 55, identifier:as_view; 56, attribute; 57, integer:0; 58, subscript; 59, call; 60, identifier:_ignore_model_permissions; 61, True; 62, identifier:exclude_from_schema; 63, True; 64, identifier:self; 65, identifier:request; 66, list_splat_pattern; 67, dictionary_splat_pattern; 68, expression_statement; 69, expression_statement; 70, for_statement; 71, return_statement; 72, identifier:self; 73, identifier:routes; 74, identifier:api_root_dict; 75, identifier:prefix; 76, attribute; 77, argument_list; 78, identifier:args; 79, identifier:kwargs; 80, assignment; 81, assignment; 82, pattern_list; 83, call; 84, block; 85, call; 86, identifier:list_name; 87, identifier:format; 88, keyword_argument; 89, identifier:ret; 90, call; 91, identifier:namespace; 92, attribute; 93, identifier:key; 94, identifier:url_name; 95, identifier:sorted; 96, argument_list; 97, if_statement; 98, try_statement; 99, identifier:Response; 100, argument_list; 101, identifier:basename; 102, identifier:basename; 103, identifier:OrderedDict; 104, argument_list; 105, attribute; 106, identifier:namespace; 107, call; 108, keyword_argument; 109, identifier:namespace; 110, block; 111, block; 112, except_clause; 113, identifier:ret; 114, identifier:request; 115, identifier:resolver_match; 116, attribute; 117, argument_list; 118, identifier:key; 119, call; 120, expression_statement; 121, expression_statement; 122, identifier:NoReverseMatch; 123, comment:# Don't bail out if eg. no list routes exist, only detail routes.; 124, block; 125, identifier:api_root_dict; 126, identifier:items; 127, identifier:itemgetter; 128, argument_list; 129, assignment; 130, assignment; 131, continue_statement; 132, integer:0; 133, identifier:url_name; 134, binary_operator:namespace + ':' + url_name; 135, subscript; 136, call; 137, binary_operator:namespace + ':'; 138, identifier:url_name; 139, identifier:ret; 140, identifier:key; 141, identifier:reverse; 142, argument_list; 143, identifier:namespace; 144, string; 145, identifier:url_name; 146, keyword_argument; 147, keyword_argument; 148, keyword_argument; 149, keyword_argument; 150, string_content::; 151, identifier:args; 152, identifier:args; 153, identifier:kwargs; 154, identifier:kwargs; 155, identifier:request; 156, identifier:request; 157, identifier:format; 158, call; 159, attribute; 160, argument_list; 161, identifier:kwargs; 162, identifier:get; 163, string; 164, None; 165, string_content:format | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 6, 13; 6, 14; 7, 15; 8, 16; 9, 17; 10, 18; 10, 19; 10, 20; 11, 21; 11, 22; 11, 23; 12, 24; 16, 25; 16, 26; 17, 27; 17, 28; 18, 29; 18, 30; 18, 31; 19, 32; 19, 33; 20, 34; 22, 35; 23, 36; 23, 37; 23, 38; 24, 39; 24, 40; 26, 41; 26, 42; 28, 43; 28, 44; 34, 45; 35, 46; 35, 47; 36, 48; 37, 49; 38, 50; 38, 51; 38, 52; 38, 53; 39, 54; 39, 55; 43, 56; 43, 57; 45, 58; 45, 59; 48, 60; 48, 61; 49, 62; 49, 63; 51, 64; 51, 65; 51, 66; 51, 67; 53, 68; 53, 69; 53, 70; 53, 71; 56, 72; 56, 73; 58, 74; 58, 75; 59, 76; 59, 77; 66, 78; 67, 79; 68, 80; 69, 81; 70, 82; 70, 83; 70, 84; 71, 85; 76, 86; 76, 87; 77, 88; 80, 89; 80, 90; 81, 91; 81, 92; 82, 93; 82, 94; 83, 95; 83, 96; 84, 97; 84, 98; 85, 99; 85, 100; 88, 101; 88, 102; 90, 103; 90, 104; 92, 105; 92, 106; 96, 107; 96, 108; 97, 109; 97, 110; 98, 111; 98, 112; 100, 113; 105, 114; 105, 115; 107, 116; 107, 117; 108, 118; 108, 119; 110, 120; 111, 121; 112, 122; 112, 123; 112, 124; 116, 125; 116, 126; 119, 127; 119, 128; 120, 129; 121, 130; 124, 131; 128, 132; 129, 133; 129, 134; 130, 135; 130, 136; 134, 137; 134, 138; 135, 139; 135, 140; 136, 141; 136, 142; 137, 143; 137, 144; 142, 145; 142, 146; 142, 147; 142, 148; 142, 149; 144, 150; 146, 151; 146, 152; 147, 153; 147, 154; 148, 155; 148, 156; 149, 157; 149, 158; 158, 159; 158, 160; 159, 161; 159, 162; 160, 163; 160, 164; 163, 165 | def get_api_root_view(self, api_urls=None):
"""
Return a basic root view.
"""
api_root_dict = OrderedDict()
list_name = self.routes[0].name
for prefix, viewset, basename in self.registry:
api_root_dict[prefix] = list_name.format(basename=basename)
class APIRootView(views.APIView):
_ignore_model_permissions = True
exclude_from_schema = True
def get(self, request, *args, **kwargs):
# Return a plain {"name": "hyperlink"} response.
ret = OrderedDict()
namespace = request.resolver_match.namespace
for key, url_name in sorted(api_root_dict.items(), key=itemgetter(0)):
if namespace:
url_name = namespace + ':' + url_name
try:
ret[key] = reverse(
url_name,
args=args,
kwargs=kwargs,
request=request,
format=kwargs.get('format', None)
)
except NoReverseMatch:
# Don't bail out if eg. no list routes exist, only detail routes.
continue
return Response(ret)
return APIRootView.as_view() |
0, module; 1, function_definition; 2, function_name:get_default_base_name; 3, parameters; 4, block; 5, identifier:self; 6, identifier:viewset; 7, expression_statement; 8, expression_statement; 9, if_statement; 10, return_statement; 11, comment:"""
Attempt to automatically determine base name using `get_url_name`.
"""; 12, assignment; 13, comparison_operator:queryset is not None; 14, block; 15, call; 16, identifier:queryset; 17, call; 18, identifier:queryset; 19, None; 20, expression_statement; 21, if_statement; 22, attribute; 23, argument_list; 24, identifier:getattr; 25, argument_list; 26, assignment; 27, comparison_operator:get_url_name is not None; 28, block; 29, call; 30, identifier:get_default_base_name; 31, identifier:viewset; 32, identifier:viewset; 33, string; 34, None; 35, identifier:get_url_name; 36, call; 37, identifier:get_url_name; 38, None; 39, return_statement; 40, identifier:super; 41, argument_list; 42, string_content:queryset; 43, identifier:getattr; 44, argument_list; 45, call; 46, identifier:SortedDefaultRouter; 47, identifier:self; 48, attribute; 49, string; 50, None; 51, identifier:get_url_name; 52, argument_list; 53, identifier:queryset; 54, identifier:model; 55, string_content:get_url_name | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 7, 11; 8, 12; 9, 13; 9, 14; 10, 15; 12, 16; 12, 17; 13, 18; 13, 19; 14, 20; 14, 21; 15, 22; 15, 23; 17, 24; 17, 25; 20, 26; 21, 27; 21, 28; 22, 29; 22, 30; 23, 31; 25, 32; 25, 33; 25, 34; 26, 35; 26, 36; 27, 37; 27, 38; 28, 39; 29, 40; 29, 41; 33, 42; 36, 43; 36, 44; 39, 45; 41, 46; 41, 47; 44, 48; 44, 49; 44, 50; 45, 51; 45, 52; 48, 53; 48, 54; 49, 55 | def get_default_base_name(self, viewset):
"""
Attempt to automatically determine base name using `get_url_name`.
"""
queryset = getattr(viewset, 'queryset', None)
if queryset is not None:
get_url_name = getattr(queryset.model, 'get_url_name', None)
if get_url_name is not None:
return get_url_name()
return super(SortedDefaultRouter, self).get_default_base_name(viewset) |
0, module; 1, function_definition; 2, function_name:sort_dict; 3, parameters; 4, block; 5, identifier:d; 6, default_parameter; 7, expression_statement; 8, expression_statement; 9, return_statement; 10, identifier:desc; 11, True; 12, comment:"""
Sort an ordered dictionary by value, descending.
Args:
d (OrderedDict): An ordered dictionary.
desc (bool): If true, sort desc.
Returns:
OrderedDict: The sorted dictionary.
"""; 13, assignment; 14, call; 15, identifier:sort; 16, call; 17, identifier:OrderedDict; 18, argument_list; 19, identifier:sorted; 20, argument_list; 21, identifier:sort; 22, call; 23, keyword_argument; 24, keyword_argument; 25, attribute; 26, argument_list; 27, identifier:key; 28, lambda; 29, identifier:reverse; 30, identifier:desc; 31, identifier:d; 32, identifier:items; 33, lambda_parameters; 34, subscript; 35, identifier:x; 36, identifier:x; 37, integer:1 | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 6, 10; 6, 11; 7, 12; 8, 13; 9, 14; 13, 15; 13, 16; 14, 17; 14, 18; 16, 19; 16, 20; 18, 21; 20, 22; 20, 23; 20, 24; 22, 25; 22, 26; 23, 27; 23, 28; 24, 29; 24, 30; 25, 31; 25, 32; 28, 33; 28, 34; 33, 35; 34, 36; 34, 37 | def sort_dict(d, desc=True):
"""
Sort an ordered dictionary by value, descending.
Args:
d (OrderedDict): An ordered dictionary.
desc (bool): If true, sort desc.
Returns:
OrderedDict: The sorted dictionary.
"""
sort = sorted(d.items(), key=lambda x: x[1], reverse=desc)
return OrderedDict(sort) |
0, module; 1, function_definition; 2, function_name:_tupleCompare; 3, parameters; 4, block; 5, identifier:tuple1; 6, identifier:ineq; 7, identifier:tuple2; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, expression_statement; 12, expression_statement; 13, for_statement; 14, return_statement; 15, identifier:eq; 16, lambda; 17, identifier:ander; 18, identifier:AND; 19, identifier:orer; 20, identifier:OR; 21, comment:"""
Compare two 'in-database tuples'. Useful when sorting by a compound key
and slicing into the middle of that query.
"""; 22, assignment; 23, identifier:limit; 24, call; 25, block; 26, call; 27, lambda_parameters; 28, parenthesized_expression; 29, identifier:orholder; 30, list; 31, identifier:range; 32, argument_list; 33, expression_statement; 34, expression_statement; 35, expression_statement; 36, identifier:orer; 37, argument_list; 38, identifier:a; 39, identifier:b; 40, comparison_operator:a==b; 41, call; 42, assignment; 43, assignment; 44, call; 45, list_splat; 46, identifier:a; 47, identifier:b; 48, identifier:len; 49, argument_list; 50, identifier:eqconstraint; 51, list_comprehension; 52, identifier:ineqconstraint; 53, call; 54, attribute; 55, argument_list; 56, identifier:orholder; 57, identifier:tuple1; 58, call; 59, for_in_clause; 60, identifier:ineq; 61, argument_list; 62, identifier:orholder; 63, identifier:append; 64, call; 65, identifier:eq; 66, argument_list; 67, pattern_list; 68, subscript; 69, subscript; 70, subscript; 71, identifier:ander; 72, argument_list; 73, identifier:elem1; 74, identifier:elem2; 75, identifier:elem1; 76, identifier:elem2; 77, call; 78, slice; 79, identifier:tuple1; 80, identifier:limit; 81, identifier:tuple2; 82, identifier:limit; 83, list_splat; 84, identifier:zip; 85, argument_list; 86, identifier:limit; 87, parenthesized_expression; 88, identifier:tuple1; 89, identifier:tuple2; 90, binary_operator:eqconstraint + [ineqconstraint]; 91, identifier:eqconstraint; 92, list; 93, identifier:ineqconstraint | 0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 4, 11; 4, 12; 4, 13; 4, 14; 8, 15; 8, 16; 9, 17; 9, 18; 10, 19; 10, 20; 11, 21; 12, 22; 13, 23; 13, 24; 13, 25; 14, 26; 16, 27; 16, 28; 22, 29; 22, 30; 24, 31; 24, 32; 25, 33; 25, 34; 25, 35; 26, 36; 26, 37; 27, 38; 27, 39; 28, 40; 32, 41; 33, 42; 34, 43; 35, 44; 37, 45; 40, 46; 40, 47; 41, 48; 41, 49; 42, 50; 42, 51; 43, 52; 43, 53; 44, 54; 44, 55; 45, 56; 49, 57; 51, 58; 51, 59; 53, 60; 53, 61; 54, 62; 54, 63; 55, 64; 58, 65; 58, 66; 59, 67; 59, 68; 61, 69; 61, 70; 64, 71; 64, 72; 66, 73; 66, 74; 67, 75; 67, 76; 68, 77; 68, 78; 69, 79; 69, 80; 70, 81; 70, 82; 72, 83; 77, 84; 77, 85; 78, 86; 83, 87; 85, 88; 85, 89; 87, 90; 90, 91; 90, 92; 92, 93 | def _tupleCompare(tuple1, ineq, tuple2,
eq=lambda a,b: (a==b),
ander=AND,
orer=OR):
"""
Compare two 'in-database tuples'. Useful when sorting by a compound key
and slicing into the middle of that query.
"""
orholder = []
for limit in range(len(tuple1)):
eqconstraint = [
eq(elem1, elem2) for elem1, elem2 in zip(tuple1, tuple2)[:limit]]
ineqconstraint = ineq(tuple1[limit], tuple2[limit])
orholder.append(ander(*(eqconstraint + [ineqconstraint])))
return orer(*orholder) |
0, module; 1, function_definition; 2, function_name:get_datasets; 3, parameters; 4, comment:# type: (str, Any) -> List[hdx.data.dataset.Dataset]; 5, block; 6, identifier:self; 7, default_parameter; 8, dictionary_splat_pattern; 9, expression_statement; 10, return_statement; 11, identifier:query; 12, string; 13, identifier:kwargs; 14, comment:"""Get list of datasets in organization
Args:
query (str): Restrict datasets returned to this query (in Solr format). Defaults to '*:*'.
**kwargs: See below
sort (string): Sorting of the search results. Defaults to 'relevance asc, metadata_modified desc'.
rows (int): Number of matching rows to return. Defaults to all datasets (sys.maxsize).
start (int): Offset in the complete result for where the set of returned datasets should begin
facet (string): Whether to enable faceted results. Default to True.
facet.mincount (int): Minimum counts for facet fields should be included in the results
facet.limit (int): Maximum number of values the facet fields return (- = unlimited). Defaults to 50.
facet.field (List[str]): Fields to facet upon. Default is empty.
use_default_schema (bool): Use default package schema instead of custom schema. Defaults to False.
Returns:
List[Dataset]: List of datasets in organization
"""; 15, call; 16, string_content:*:*; 17, attribute; 18, argument_list; 19, attribute; 20, identifier:search_in_hdx; 21, keyword_argument; 22, keyword_argument; 23, keyword_argument; 24, dictionary_splat; 25, attribute; 26, identifier:Dataset; 27, identifier:query; 28, identifier:query; 29, identifier:configuration; 30, attribute; 31, identifier:fq; 32, binary_operator:'organization:%s' % self.data['name']; 33, identifier:kwargs; 34, attribute; 35, identifier:dataset; 36, identifier:self; 37, identifier:configuration; 38, string; 39, subscript; 40, identifier:hdx; 41, identifier:data; 42, string_content:organization:%s; 43, attribute; 44, string; 45, identifier:self; 46, identifier:data; 47, string_content:name | 0, 1; 1, 2; 1, 3; 1, 4; 1, 5; 3, 6; 3, 7; 3, 8; 5, 9; 5, 10; 7, 11; 7, 12; 8, 13; 9, 14; 10, 15; 12, 16; 15, 17; 15, 18; 17, 19; 17, 20; 18, 21; 18, 22; 18, 23; 18, 24; 19, 25; 19, 26; 21, 27; 21, 28; 22, 29; 22, 30; 23, 31; 23, 32; 24, 33; 25, 34; 25, 35; 30, 36; 30, 37; 32, 38; 32, 39; 34, 40; 34, 41; 38, 42; 39, 43; 39, 44; 43, 45; 43, 46; 44, 47 | def get_datasets(self, query='*:*', **kwargs):
# type: (str, Any) -> List[hdx.data.dataset.Dataset]
"""Get list of datasets in organization
Args:
query (str): Restrict datasets returned to this query (in Solr format). Defaults to '*:*'.
**kwargs: See below
sort (string): Sorting of the search results. Defaults to 'relevance asc, metadata_modified desc'.
rows (int): Number of matching rows to return. Defaults to all datasets (sys.maxsize).
start (int): Offset in the complete result for where the set of returned datasets should begin
facet (string): Whether to enable faceted results. Default to True.
facet.mincount (int): Minimum counts for facet fields should be included in the results
facet.limit (int): Maximum number of values the facet fields return (- = unlimited). Defaults to 50.
facet.field (List[str]): Fields to facet upon. Default is empty.
use_default_schema (bool): Use default package schema instead of custom schema. Defaults to False.
Returns:
List[Dataset]: List of datasets in organization
"""
return hdx.data.dataset.Dataset.search_in_hdx(query=query,
configuration=self.configuration,
fq='organization:%s' % self.data['name'], **kwargs) |
0, module; 1, function_definition; 2, function_name:get_all_organization_names; 3, parameters; 4, comment:# type: (Optional[Configuration], Any) -> List[str]; 5, block; 6, default_parameter; 7, dictionary_splat_pattern; 8, expression_statement; 9, expression_statement; 10, expression_statement; 11, comment:# only for error message if produced; 12, return_statement; 13, identifier:configuration; 14, None; 15, identifier:kwargs; 16, comment:"""Get all organization names in HDX
Args:
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
**kwargs: See below
sort (str): Sort the search results according to field name and sort-order. Allowed fields are ‘name’, ‘package_count’ and ‘title’. Defaults to 'name asc'.
organizations (List[str]): List of names of the groups to return.
all_fields (bool): Return group dictionaries instead of just names. Only core fields are returned - get some more using the include_* options. Defaults to False.
include_extras (bool): If all_fields, include the group extra fields. Defaults to False.
include_tags (bool): If all_fields, include the group tags. Defaults to False.
include_groups: If all_fields, include the groups the groups are in. Defaults to False.
Returns:
List[str]: List of all organization names in HDX
"""; 17, assignment; 18, assignment; 19, call; 20, identifier:organization; 21, call; 22, subscript; 23, string; 24, attribute; 25, argument_list; 26, identifier:Organization; 27, argument_list; 28, identifier:organization; 29, string; 30, string_content:all organizations; 31, identifier:organization; 32, identifier:_write_to_hdx; 33, string; 34, identifier:kwargs; 35, string; 36, keyword_argument; 37, string_content:id; 38, string_content:list; 39, string_content:id; 40, identifier:configuration; 41, identifier:configuration | 0, 1; 1, 2; 1, 3; 1, 4; 1, 5; 3, 6; 3, 7; 5, 8; 5, 9; 5, 10; 5, 11; 5, 12; 6, 13; 6, 14; 7, 15; 8, 16; 9, 17; 10, 18; 12, 19; 17, 20; 17, 21; 18, 22; 18, 23; 19, 24; 19, 25; 21, 26; 21, 27; 22, 28; 22, 29; 23, 30; 24, 31; 24, 32; 25, 33; 25, 34; 25, 35; 27, 36; 29, 37; 33, 38; 35, 39; 36, 40; 36, 41 | def get_all_organization_names(configuration=None, **kwargs):
# type: (Optional[Configuration], Any) -> List[str]
"""Get all organization names in HDX
Args:
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
**kwargs: See below
sort (str): Sort the search results according to field name and sort-order. Allowed fields are ‘name’, ‘package_count’ and ‘title’. Defaults to 'name asc'.
organizations (List[str]): List of names of the groups to return.
all_fields (bool): Return group dictionaries instead of just names. Only core fields are returned - get some more using the include_* options. Defaults to False.
include_extras (bool): If all_fields, include the group extra fields. Defaults to False.
include_tags (bool): If all_fields, include the group tags. Defaults to False.
include_groups: If all_fields, include the groups the groups are in. Defaults to False.
Returns:
List[str]: List of all organization names in HDX
"""
organization = Organization(configuration=configuration)
organization['id'] = 'all organizations' # only for error message if produced
return organization._write_to_hdx('list', kwargs, 'id') |
0, module; 1, function_definition; 2, function_name:search_in_hdx; 3, parameters; 4, comment:# type: (Optional[str], Optional[Configuration], int, Any) -> List['Dataset']; 5, block; 6, identifier:cls; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, dictionary_splat_pattern; 11, expression_statement; 12, expression_statement; 13, expression_statement; 14, expression_statement; 15, expression_statement; 16, expression_statement; 17, while_statement; 18, if_statement; 19, return_statement; 20, identifier:query; 21, string; 22, identifier:configuration; 23, None; 24, identifier:page_size; 25, integer:1000; 26, identifier:kwargs; 27, comment:"""Searches for datasets in HDX
Args:
query (Optional[str]): Query (in Solr format). Defaults to '*:*'.
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
page_size (int): Size of page to return. Defaults to 1000.
**kwargs: See below
fq (string): Any filter queries to apply
sort (string): Sorting of the search results. Defaults to 'relevance asc, metadata_modified desc'.
rows (int): Number of matching rows to return. Defaults to all datasets (sys.maxsize).
start (int): Offset in the complete result for where the set of returned datasets should begin
facet (string): Whether to enable faceted results. Default to True.
facet.mincount (int): Minimum counts for facet fields should be included in the results
facet.limit (int): Maximum number of values the facet fields return (- = unlimited). Defaults to 50.
facet.field (List[str]): Fields to facet upon. Default is empty.
use_default_schema (bool): Use default package schema instead of custom schema. Defaults to False.
Returns:
List[Dataset]: list of datasets resulting from query
"""; 28, assignment; 29, assignment; 30, assignment; 31, assignment; 32, assignment; 33, boolean_operator; 34, comment:# if the count values vary for multiple calls, then must redo query; 35, block; 36, boolean_operator; 37, block; 38, identifier:all_datasets; 39, string_content:*:*; 40, identifier:dataset; 41, call; 42, identifier:total_rows; 43, call; 44, identifier:start; 45, call; 46, identifier:all_datasets; 47, None; 48, identifier:attempts; 49, integer:0; 50, comparison_operator:attempts < cls.max_attempts; 51, comparison_operator:all_datasets is None; 52, expression_statement; 53, expression_statement; 54, for_statement; 55, if_statement; 56, comparison_operator:attempts == cls.max_attempts; 57, comparison_operator:all_datasets is None; 58, raise_statement; 59, identifier:Dataset; 60, argument_list; 61, attribute; 62, argument_list; 63, attribute; 64, argument_list; 65, identifier:attempts; 66, attribute; 67, identifier:all_datasets; 68, None; 69, assignment; 70, assignment; 71, identifier:page; 72, call; 73, block; 74, boolean_operator; 75, comment:# Make sure counts are all same for multiple calls to HDX; 76, block; 77, else_clause; 78, identifier:attempts; 79, attribute; 80, identifier:all_datasets; 81, None; 82, call; 83, keyword_argument; 84, identifier:kwargs; 85, identifier:get; 86, string; 87, attribute; 88, identifier:kwargs; 89, identifier:get; 90, string; 91, integer:0; 92, identifier:cls; 93, identifier:max_attempts; 94, identifier:all_datasets; 95, call; 96, identifier:counts; 97, call; 98, identifier:range; 99, argument_list; 100, expression_statement; 101, expression_statement; 102, expression_statement; 103, expression_statement; 104, expression_statement; 105, expression_statement; 106, expression_statement; 107, if_statement; 108, identifier:all_datasets; 109, comparison_operator:len(counts) != 1; 110, expression_statement; 111, expression_statement; 112, block; 113, identifier:cls; 114, identifier:max_attempts; 115, identifier:HDXError; 116, argument_list; 117, identifier:configuration; 118, identifier:configuration; 119, string_content:rows; 120, identifier:cls; 121, identifier:max_int; 122, string_content:start; 123, identifier:list; 124, argument_list; 125, identifier:set; 126, argument_list; 127, binary_operator:total_rows // page_size + 1; 128, assignment; 129, assignment; 130, assignment; 131, assignment; 132, assignment; 133, assignment; 134, assignment; 135, identifier:result; 136, block; 137, else_clause; 138, call; 139, integer:1; 140, assignment; 141, augmented_assignment; 142, expression_statement; 143, comment:# check for duplicates (shouldn't happen); 144, if_statement; 145, string; 146, binary_operator:total_rows // page_size; 147, integer:1; 148, identifier:pagetimespagesize; 149, binary_operator:page * page_size; 150, subscript; 151, binary_operator:start + pagetimespagesize; 152, identifier:rows_left; 153, binary_operator:total_rows - pagetimespagesize; 154, identifier:rows; 155, call; 156, subscript; 157, identifier:rows; 158, pattern_list; 159, call; 160, identifier:datasets; 161, call; 162, expression_statement; 163, if_statement; 164, block; 165, identifier:len; 166, argument_list; 167, identifier:all_datasets; 168, None; 169, identifier:attempts; 170, integer:1; 171, assignment; 172, comparison_operator:len(ids) != len(set(ids)); 173, block; 174, string_content:Maximum attempts reached for searching for datasets!; 175, identifier:total_rows; 176, identifier:page_size; 177, identifier:page; 178, identifier:page_size; 179, identifier:kwargs; 180, string; 181, identifier:start; 182, identifier:pagetimespagesize; 183, identifier:total_rows; 184, identifier:pagetimespagesize; 185, identifier:min; 186, argument_list; 187, identifier:kwargs; 188, string; 189, identifier:_; 190, identifier:result; 191, attribute; 192, argument_list; 193, identifier:list; 194, argument_list; 195, assignment; 196, identifier:count; 197, block; 198, else_clause; 199, expression_statement; 200, identifier:counts; 201, identifier:ids; 202, list_comprehension; 203, call; 204, call; 205, expression_statement; 206, expression_statement; 207, string_content:start; 208, identifier:rows_left; 209, identifier:page_size; 210, string_content:rows; 211, identifier:dataset; 212, identifier:_read_from_hdx; 213, string; 214, identifier:query; 215, string; 216, subscript; 217, dictionary_splat; 218, identifier:count; 219, call; 220, expression_statement; 221, expression_statement; 222, for_statement; 223, expression_statement; 224, if_statement; 225, block; 226, call; 227, subscript; 228, for_in_clause; 229, identifier:len; 230, argument_list; 231, identifier:len; 232, argument_list; 233, assignment; 234, augmented_assignment; 235, string_content:dataset; 236, string_content:q; 237, call; 238, string; 239, identifier:kwargs; 240, attribute; 241, argument_list; 242, call; 243, assignment; 244, identifier:datasetdict; 245, subscript; 246, block; 247, augmented_assignment; 248, comparison_operator:no_results < rows; 249, block; 250, break_statement; 251, attribute; 252, argument_list; 253, identifier:dataset; 254, string; 255, identifier:dataset; 256, identifier:all_datasets; 257, identifier:ids; 258, call; 259, identifier:all_datasets; 260, None; 261, identifier:attempts; 262, integer:1; 263, attribute; 264, argument_list; 265, string_content:search; 266, identifier:result; 267, identifier:get; 268, string; 269, None; 270, attribute; 271, argument_list; 272, identifier:no_results; 273, call; 274, identifier:result; 275, string; 276, expression_statement; 277, expression_statement; 278, expression_statement; 279, expression_statement; 280, expression_statement; 281, identifier:all_datasets; 282, identifier:datasets; 283, identifier:no_results; 284, identifier:rows; 285, break_statement; 286, identifier:logger; 287, identifier:debug; 288, identifier:result; 289, string_content:id; 290, identifier:set; 291, argument_list; 292, identifier:Dataset; 293, identifier:actions; 294, string_content:count; 295, identifier:counts; 296, identifier:add; 297, identifier:count; 298, identifier:len; 299, argument_list; 300, string_content:results; 301, assignment; 302, assignment; 303, assignment; 304, call; 305, call; 306, identifier:ids; 307, subscript; 308, identifier:dataset; 309, call; 310, attribute; 311, call; 312, attribute; 313, identifier:datasetdict; 314, attribute; 315, argument_list; 316, attribute; 317, argument_list; 318, identifier:result; 319, string; 320, identifier:Dataset; 321, argument_list; 322, identifier:dataset; 323, identifier:old_data; 324, identifier:dict; 325, argument_list; 326, identifier:dataset; 327, identifier:data; 328, identifier:dataset; 329, identifier:_dataset_create_resources; 330, identifier:datasets; 331, identifier:append; 332, identifier:dataset; 333, string_content:results; 334, keyword_argument; 335, identifier:configuration; 336, identifier:configuration | 0, 1; 1, 2; 1, 3; 1, 4; 1, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 5, 11; 5, 12; 5, 13; 5, 14; 5, 15; 5, 16; 5, 17; 5, 18; 5, 19; 7, 20; 7, 21; 8, 22; 8, 23; 9, 24; 9, 25; 10, 26; 11, 27; 12, 28; 13, 29; 14, 30; 15, 31; 16, 32; 17, 33; 17, 34; 17, 35; 18, 36; 18, 37; 19, 38; 21, 39; 28, 40; 28, 41; 29, 42; 29, 43; 30, 44; 30, 45; 31, 46; 31, 47; 32, 48; 32, 49; 33, 50; 33, 51; 35, 52; 35, 53; 35, 54; 35, 55; 36, 56; 36, 57; 37, 58; 41, 59; 41, 60; 43, 61; 43, 62; 45, 63; 45, 64; 50, 65; 50, 66; 51, 67; 51, 68; 52, 69; 53, 70; 54, 71; 54, 72; 54, 73; 55, 74; 55, 75; 55, 76; 55, 77; 56, 78; 56, 79; 57, 80; 57, 81; 58, 82; 60, 83; 61, 84; 61, 85; 62, 86; 62, 87; 63, 88; 63, 89; 64, 90; 64, 91; 66, 92; 66, 93; 69, 94; 69, 95; 70, 96; 70, 97; 72, 98; 72, 99; 73, 100; 73, 101; 73, 102; 73, 103; 73, 104; 73, 105; 73, 106; 73, 107; 74, 108; 74, 109; 76, 110; 76, 111; 77, 112; 79, 113; 79, 114; 82, 115; 82, 116; 83, 117; 83, 118; 86, 119; 87, 120; 87, 121; 90, 122; 95, 123; 95, 124; 97, 125; 97, 126; 99, 127; 100, 128; 101, 129; 102, 130; 103, 131; 104, 132; 105, 133; 106, 134; 107, 135; 107, 136; 107, 137; 109, 138; 109, 139; 110, 140; 111, 141; 112, 142; 112, 143; 112, 144; 116, 145; 127, 146; 127, 147; 128, 148; 128, 149; 129, 150; 129, 151; 130, 152; 130, 153; 131, 154; 131, 155; 132, 156; 132, 157; 133, 158; 133, 159; 134, 160; 134, 161; 136, 162; 136, 163; 137, 164; 138, 165; 138, 166; 140, 167; 140, 168; 141, 169; 141, 170; 142, 171; 144, 172; 144, 173; 145, 174; 146, 175; 146, 176; 149, 177; 149, 178; 150, 179; 150, 180; 151, 181; 151, 182; 153, 183; 153, 184; 155, 185; 155, 186; 156, 187; 156, 188; 158, 189; 158, 190; 159, 191; 159, 192; 161, 193; 161, 194; 162, 195; 163, 196; 163, 197; 163, 198; 164, 199; 166, 200; 171, 201; 171, 202; 172, 203; 172, 204; 173, 205; 173, 206; 180, 207; 186, 208; 186, 209; 188, 210; 191, 211; 191, 212; 192, 213; 192, 214; 192, 215; 192, 216; 192, 217; 195, 218; 195, 219; 197, 220; 197, 221; 197, 222; 197, 223; 197, 224; 198, 225; 199, 226; 202, 227; 202, 228; 203, 229; 203, 230; 204, 231; 204, 232; 205, 233; 206, 234; 213, 235; 215, 236; 216, 237; 216, 238; 217, 239; 219, 240; 219, 241; 220, 242; 221, 243; 222, 244; 222, 245; 222, 246; 223, 247; 224, 248; 224, 249; 225, 250; 226, 251; 226, 252; 227, 253; 227, 254; 228, 255; 228, 256; 230, 257; 232, 258; 233, 259; 233, 260; 234, 261; 234, 262; 237, 263; 237, 264; 238, 265; 240, 266; 240, 267; 241, 268; 241, 269; 242, 270; 242, 271; 243, 272; 243, 273; 245, 274; 245, 275; 246, 276; 246, 277; 246, 278; 246, 279; 246, 280; 247, 281; 247, 282; 248, 283; 248, 284; 249, 285; 251, 286; 251, 287; 252, 288; 254, 289; 258, 290; 258, 291; 263, 292; 263, 293; 268, 294; 270, 295; 270, 296; 271, 297; 273, 298; 273, 299; 275, 300; 276, 301; 277, 302; 278, 303; 279, 304; 280, 305; 291, 306; 299, 307; 301, 308; 301, 309; 302, 310; 302, 311; 303, 312; 303, 313; 304, 314; 304, 315; 305, 316; 305, 317; 307, 318; 307, 319; 309, 320; 309, 321; 310, 322; 310, 323; 311, 324; 311, 325; 312, 326; 312, 327; 314, 328; 314, 329; 316, 330; 316, 331; 317, 332; 319, 333; 321, 334; 334, 335; 334, 336 | def search_in_hdx(cls, query='*:*', configuration=None, page_size=1000, **kwargs):
# type: (Optional[str], Optional[Configuration], int, Any) -> List['Dataset']
"""Searches for datasets in HDX
Args:
query (Optional[str]): Query (in Solr format). Defaults to '*:*'.
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
page_size (int): Size of page to return. Defaults to 1000.
**kwargs: See below
fq (string): Any filter queries to apply
sort (string): Sorting of the search results. Defaults to 'relevance asc, metadata_modified desc'.
rows (int): Number of matching rows to return. Defaults to all datasets (sys.maxsize).
start (int): Offset in the complete result for where the set of returned datasets should begin
facet (string): Whether to enable faceted results. Default to True.
facet.mincount (int): Minimum counts for facet fields should be included in the results
facet.limit (int): Maximum number of values the facet fields return (- = unlimited). Defaults to 50.
facet.field (List[str]): Fields to facet upon. Default is empty.
use_default_schema (bool): Use default package schema instead of custom schema. Defaults to False.
Returns:
List[Dataset]: list of datasets resulting from query
"""
dataset = Dataset(configuration=configuration)
total_rows = kwargs.get('rows', cls.max_int)
start = kwargs.get('start', 0)
all_datasets = None
attempts = 0
while attempts < cls.max_attempts and all_datasets is None: # if the count values vary for multiple calls, then must redo query
all_datasets = list()
counts = set()
for page in range(total_rows // page_size + 1):
pagetimespagesize = page * page_size
kwargs['start'] = start + pagetimespagesize
rows_left = total_rows - pagetimespagesize
rows = min(rows_left, page_size)
kwargs['rows'] = rows
_, result = dataset._read_from_hdx('dataset', query, 'q', Dataset.actions()['search'], **kwargs)
datasets = list()
if result:
count = result.get('count', None)
if count:
counts.add(count)
no_results = len(result['results'])
for datasetdict in result['results']:
dataset = Dataset(configuration=configuration)
dataset.old_data = dict()
dataset.data = datasetdict
dataset._dataset_create_resources()
datasets.append(dataset)
all_datasets += datasets
if no_results < rows:
break
else:
break
else:
logger.debug(result)
if all_datasets and len(counts) != 1: # Make sure counts are all same for multiple calls to HDX
all_datasets = None
attempts += 1
else:
ids = [dataset['id'] for dataset in all_datasets] # check for duplicates (shouldn't happen)
if len(ids) != len(set(ids)):
all_datasets = None
attempts += 1
if attempts == cls.max_attempts and all_datasets is None:
raise HDXError('Maximum attempts reached for searching for datasets!')
return all_datasets |
0, module; 1, function_definition; 2, function_name:sort_dict; 3, parameters; 4, type; 5, block; 6, typed_parameter; 7, typed_default_parameter; 8, typed_default_parameter; 9, attribute; 10, expression_statement; 11, if_statement; 12, return_statement; 13, identifier:d; 14, type; 15, identifier:by; 16, type; 17, string; 18, identifier:allow_duplicates; 19, type; 20, True; 21, identifier:collections; 22, identifier:OrderedDict; 23, comment:"""
Sort a dictionary by key or value.
The function relies on
https://docs.python.org/3/library/collections.html#collections.OrderedDict .
The dulicated are determined based on
https://stackoverflow.com/questions/9835762/find-and-list-duplicates-in-a-list .
Parameters
----------
d : dict
Input dictionary
by : ['key','value'], optional
By what to sort the input dictionary
allow_duplicates : bool, optional
Flag to indicate if the duplicates are allowed.
Returns
-------
collections.OrderedDict
Sorted dictionary.
>>> sort_dict({2: 3, 1: 2, 3: 1})
OrderedDict([(1, 2), (2, 3), (3, 1)])
>>> sort_dict({2: 3, 1: 2, 3: 1}, by='value')
OrderedDict([(3, 1), (1, 2), (2, 3)])
>>> sort_dict({'2': 3, '1': 2}, by='value')
OrderedDict([('1', 2), ('2', 3)])
>>> sort_dict({2: 1, 1: 2, 3: 1}, by='value', allow_duplicates=False)
Traceback (most recent call last):
...
ValueError: There are duplicates in the values: {1}
>>> sort_dict({1:1,2:3},by=True)
Traceback (most recent call last):
...
ValueError: by can be 'key' or 'value'.
"""; 24, comparison_operator:by == 'key'; 25, block; 26, elif_clause; 27, else_clause; 28, call; 29, identifier:dict; 30, identifier:str; 31, string_content:key; 32, identifier:bool; 33, identifier:by; 34, string; 35, expression_statement; 36, comparison_operator:by == 'value'; 37, block; 38, block; 39, attribute; 40, argument_list; 41, string_content:key; 42, assignment; 43, identifier:by; 44, string; 45, expression_statement; 46, if_statement; 47, expression_statement; 48, raise_statement; 49, identifier:collections; 50, identifier:OrderedDict; 51, call; 52, identifier:i; 53, integer:0; 54, string_content:value; 55, assignment; 56, boolean_operator; 57, block; 58, assignment; 59, call; 60, identifier:sorted; 61, argument_list; 62, identifier:values; 63, call; 64, comparison_operator:len(values) != len(set(values)); 65, not_operator; 66, expression_statement; 67, raise_statement; 68, identifier:i; 69, integer:1; 70, identifier:ValueError; 71, argument_list; 72, call; 73, keyword_argument; 74, identifier:list; 75, argument_list; 76, call; 77, call; 78, identifier:allow_duplicates; 79, assignment; 80, call; 81, string:"by can be 'key' or 'value'."; 82, attribute; 83, argument_list; 84, identifier:key; 85, lambda; 86, call; 87, identifier:len; 88, argument_list; 89, identifier:len; 90, argument_list; 91, identifier:duplicates; 92, call; 93, identifier:ValueError; 94, argument_list; 95, identifier:d; 96, identifier:items; 97, lambda_parameters; 98, subscript; 99, attribute; 100, argument_list; 101, identifier:values; 102, call; 103, identifier:find_duplicates; 104, argument_list; 105, call; 106, identifier:t; 107, identifier:t; 108, identifier:i; 109, identifier:d; 110, identifier:values; 111, identifier:set; 112, argument_list; 113, identifier:values; 114, attribute; 115, argument_list; 116, identifier:values; 117, string:"There are duplicates in the values: {}"; 118, identifier:format; 119, identifier:duplicates | 0, 1; 1, 2; 1, 3; 1, 4; 1, 5; 3, 6; 3, 7; 3, 8; 4, 9; 5, 10; 5, 11; 5, 12; 6, 13; 6, 14; 7, 15; 7, 16; 7, 17; 8, 18; 8, 19; 8, 20; 9, 21; 9, 22; 10, 23; 11, 24; 11, 25; 11, 26; 11, 27; 12, 28; 14, 29; 16, 30; 17, 31; 19, 32; 24, 33; 24, 34; 25, 35; 26, 36; 26, 37; 27, 38; 28, 39; 28, 40; 34, 41; 35, 42; 36, 43; 36, 44; 37, 45; 37, 46; 37, 47; 38, 48; 39, 49; 39, 50; 40, 51; 42, 52; 42, 53; 44, 54; 45, 55; 46, 56; 46, 57; 47, 58; 48, 59; 51, 60; 51, 61; 55, 62; 55, 63; 56, 64; 56, 65; 57, 66; 57, 67; 58, 68; 58, 69; 59, 70; 59, 71; 61, 72; 61, 73; 63, 74; 63, 75; 64, 76; 64, 77; 65, 78; 66, 79; 67, 80; 71, 81; 72, 82; 72, 83; 73, 84; 73, 85; 75, 86; 76, 87; 76, 88; 77, 89; 77, 90; 79, 91; 79, 92; 80, 93; 80, 94; 82, 95; 82, 96; 85, 97; 85, 98; 86, 99; 86, 100; 88, 101; 90, 102; 92, 103; 92, 104; 94, 105; 97, 106; 98, 107; 98, 108; 99, 109; 99, 110; 102, 111; 102, 112; 104, 113; 105, 114; 105, 115; 112, 116; 114, 117; 114, 118; 115, 119 | def sort_dict(d: dict, by: str = 'key',
allow_duplicates: bool = True) -> collections.OrderedDict:
"""
Sort a dictionary by key or value.
The function relies on
https://docs.python.org/3/library/collections.html#collections.OrderedDict .
The dulicated are determined based on
https://stackoverflow.com/questions/9835762/find-and-list-duplicates-in-a-list .
Parameters
----------
d : dict
Input dictionary
by : ['key','value'], optional
By what to sort the input dictionary
allow_duplicates : bool, optional
Flag to indicate if the duplicates are allowed.
Returns
-------
collections.OrderedDict
Sorted dictionary.
>>> sort_dict({2: 3, 1: 2, 3: 1})
OrderedDict([(1, 2), (2, 3), (3, 1)])
>>> sort_dict({2: 3, 1: 2, 3: 1}, by='value')
OrderedDict([(3, 1), (1, 2), (2, 3)])
>>> sort_dict({'2': 3, '1': 2}, by='value')
OrderedDict([('1', 2), ('2', 3)])
>>> sort_dict({2: 1, 1: 2, 3: 1}, by='value', allow_duplicates=False)
Traceback (most recent call last):
...
ValueError: There are duplicates in the values: {1}
>>> sort_dict({1:1,2:3},by=True)
Traceback (most recent call last):
...
ValueError: by can be 'key' or 'value'.
"""
if by == 'key':
i = 0
elif by == 'value':
values = list(d.values())
if len(values) != len(set(values)) and not allow_duplicates:
duplicates = find_duplicates(values)
raise ValueError("There are duplicates in the values: {}".format(duplicates))
i = 1
else:
raise ValueError("by can be 'key' or 'value'.")
return collections.OrderedDict(sorted(d.items(), key=lambda t: t[i])) |
0, module; 1, function_definition; 2, function_name:get_all_users; 3, parameters; 4, comment:# type: (Optional[Configuration], Any) -> List['User']; 5, block; 6, default_parameter; 7, dictionary_splat_pattern; 8, expression_statement; 9, expression_statement; 10, expression_statement; 11, comment:# only for error message if produced; 12, expression_statement; 13, expression_statement; 14, if_statement; 15, return_statement; 16, identifier:configuration; 17, None; 18, identifier:kwargs; 19, comment:"""Get all users in HDX
Args:
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
**kwargs: See below
q (str): Restrict to names containing a string. Defaults to all users.
order_by (str): Field by which to sort - any user field or edits (number_of_edits). Defaults to 'name'.
Returns:
List[User]: List of all users in HDX
"""; 20, assignment; 21, assignment; 22, assignment; 23, assignment; 24, identifier:result; 25, block; 26, else_clause; 27, identifier:users; 28, identifier:user; 29, call; 30, subscript; 31, string; 32, identifier:result; 33, call; 34, identifier:users; 35, call; 36, for_statement; 37, block; 38, identifier:User; 39, argument_list; 40, identifier:user; 41, string; 42, string_content:all users; 43, attribute; 44, argument_list; 45, identifier:list; 46, argument_list; 47, identifier:userdict; 48, identifier:result; 49, block; 50, expression_statement; 51, keyword_argument; 52, string_content:id; 53, identifier:user; 54, identifier:_write_to_hdx; 55, string; 56, identifier:kwargs; 57, string; 58, expression_statement; 59, expression_statement; 60, call; 61, identifier:configuration; 62, identifier:configuration; 63, string_content:list; 64, string_content:id; 65, assignment; 66, call; 67, attribute; 68, argument_list; 69, identifier:user; 70, call; 71, attribute; 72, argument_list; 73, identifier:logger; 74, identifier:debug; 75, identifier:result; 76, identifier:User; 77, argument_list; 78, identifier:users; 79, identifier:append; 80, identifier:user; 81, identifier:userdict; 82, keyword_argument; 83, identifier:configuration; 84, identifier:configuration | 0, 1; 1, 2; 1, 3; 1, 4; 1, 5; 3, 6; 3, 7; 5, 8; 5, 9; 5, 10; 5, 11; 5, 12; 5, 13; 5, 14; 5, 15; 6, 16; 6, 17; 7, 18; 8, 19; 9, 20; 10, 21; 12, 22; 13, 23; 14, 24; 14, 25; 14, 26; 15, 27; 20, 28; 20, 29; 21, 30; 21, 31; 22, 32; 22, 33; 23, 34; 23, 35; 25, 36; 26, 37; 29, 38; 29, 39; 30, 40; 30, 41; 31, 42; 33, 43; 33, 44; 35, 45; 35, 46; 36, 47; 36, 48; 36, 49; 37, 50; 39, 51; 41, 52; 43, 53; 43, 54; 44, 55; 44, 56; 44, 57; 49, 58; 49, 59; 50, 60; 51, 61; 51, 62; 55, 63; 57, 64; 58, 65; 59, 66; 60, 67; 60, 68; 65, 69; 65, 70; 66, 71; 66, 72; 67, 73; 67, 74; 68, 75; 70, 76; 70, 77; 71, 78; 71, 79; 72, 80; 77, 81; 77, 82; 82, 83; 82, 84 | def get_all_users(configuration=None, **kwargs):
# type: (Optional[Configuration], Any) -> List['User']
"""Get all users in HDX
Args:
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
**kwargs: See below
q (str): Restrict to names containing a string. Defaults to all users.
order_by (str): Field by which to sort - any user field or edits (number_of_edits). Defaults to 'name'.
Returns:
List[User]: List of all users in HDX
"""
user = User(configuration=configuration)
user['id'] = 'all users' # only for error message if produced
result = user._write_to_hdx('list', kwargs, 'id')
users = list()
if result:
for userdict in result:
user = User(userdict, configuration=configuration)
users.append(user)
else:
logger.debug(result)
return users |
0, module; 1, function_definition; 2, function_name:fromkeys; 3, parameters; 4, comment:# TODO : type: (Iterable, Union[Any, Callable]) -> DictWrapper; 5, comment:# https://github.com/python/mypy/issues/2254; 6, block; 7, identifier:cls; 8, identifier:iterable; 9, default_parameter; 10, expression_statement; 11, if_statement; 12, return_statement; 13, identifier:value; 14, None; 15, comment:"""Create a new d from
Args:
iterable: Iterable containing keys
value: value to associate with each key.
If callable, will be value[key]
Returns: new DictWrapper
Example:
>>> from ww import d
>>> sorted(d.fromkeys('123', value=4).items())
[('1', 4), ('2', 4), ('3', 4)]
>>> sorted(d.fromkeys(range(3), value=lambda e:e**2).items())
[(0, 0), (1, 1), (2, 4)]
"""; 16, not_operator; 17, block; 18, call; 19, call; 20, return_statement; 21, identifier:cls; 22, generator_expression; 23, identifier:callable; 24, argument_list; 25, call; 26, tuple; 27, for_in_clause; 28, identifier:value; 29, identifier:cls; 30, argument_list; 31, identifier:key; 32, call; 33, identifier:key; 34, identifier:iterable; 35, call; 36, identifier:value; 37, argument_list; 38, attribute; 39, argument_list; 40, identifier:key; 41, identifier:dict; 42, identifier:fromkeys; 43, identifier:iterable; 44, identifier:value | 0, 1; 1, 2; 1, 3; 1, 4; 1, 5; 1, 6; 3, 7; 3, 8; 3, 9; 6, 10; 6, 11; 6, 12; 9, 13; 9, 14; 10, 15; 11, 16; 11, 17; 12, 18; 16, 19; 17, 20; 18, 21; 18, 22; 19, 23; 19, 24; 20, 25; 22, 26; 22, 27; 24, 28; 25, 29; 25, 30; 26, 31; 26, 32; 27, 33; 27, 34; 30, 35; 32, 36; 32, 37; 35, 38; 35, 39; 37, 40; 38, 41; 38, 42; 39, 43; 39, 44 | def fromkeys(cls, iterable, value=None):
# TODO : type: (Iterable, Union[Any, Callable]) -> DictWrapper
# https://github.com/python/mypy/issues/2254
"""Create a new d from
Args:
iterable: Iterable containing keys
value: value to associate with each key.
If callable, will be value[key]
Returns: new DictWrapper
Example:
>>> from ww import d
>>> sorted(d.fromkeys('123', value=4).items())
[('1', 4), ('2', 4), ('3', 4)]
>>> sorted(d.fromkeys(range(3), value=lambda e:e**2).items())
[(0, 0), (1, 1), (2, 4)]
"""
if not callable(value):
return cls(dict.fromkeys(iterable, value))
return cls((key, value(key)) for key in iterable) |
Subsets and Splits