text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def _parse_bands(self, band_input):
"""
Parses class input and verifies band names.
:param band_input: input parameter `bands`
:type band_input: str or list(str) or None
:return: verified list of bands
:rtype: list(str)
"""
all_bands = AwsConstants.S2_L1C_BANDS if self.data_source is DataSource.SENTINEL2_L1C else \
AwsConstants.S2_L2A_BANDS
if band_input is None:
return all_bands
if isinstance(band_input, str):
band_list = band_input.split(',')
elif isinstance(band_input, list):
band_list = band_input.copy()
else:
raise ValueError('bands parameter must be a list or a string')
band_list = [band.strip().split('.')[0] for band in band_list]
band_list = [band for band in band_list if band != '']
if not set(band_list) <= set(all_bands):
raise ValueError('bands {} must be a subset of {}'.format(band_list, all_bands))
return band_list
|
[
"def",
"_parse_bands",
"(",
"self",
",",
"band_input",
")",
":",
"all_bands",
"=",
"AwsConstants",
".",
"S2_L1C_BANDS",
"if",
"self",
".",
"data_source",
"is",
"DataSource",
".",
"SENTINEL2_L1C",
"else",
"AwsConstants",
".",
"S2_L2A_BANDS",
"if",
"band_input",
"is",
"None",
":",
"return",
"all_bands",
"if",
"isinstance",
"(",
"band_input",
",",
"str",
")",
":",
"band_list",
"=",
"band_input",
".",
"split",
"(",
"','",
")",
"elif",
"isinstance",
"(",
"band_input",
",",
"list",
")",
":",
"band_list",
"=",
"band_input",
".",
"copy",
"(",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'bands parameter must be a list or a string'",
")",
"band_list",
"=",
"[",
"band",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"for",
"band",
"in",
"band_list",
"]",
"band_list",
"=",
"[",
"band",
"for",
"band",
"in",
"band_list",
"if",
"band",
"!=",
"''",
"]",
"if",
"not",
"set",
"(",
"band_list",
")",
"<=",
"set",
"(",
"all_bands",
")",
":",
"raise",
"ValueError",
"(",
"'bands {} must be a subset of {}'",
".",
"format",
"(",
"band_list",
",",
"all_bands",
")",
")",
"return",
"band_list"
] | 40.68 | 16.6 |
def get_dataframe(self, tickers,
startDate=None, endDate=None, metric_name=None, frequency='daily'):
""" Return a pandas.DataFrame of historical prices for one or more ticker symbols.
By default, return latest EOD Composite Price for a list of stock tickers.
On average, each feed contains 3 data sources.
Supported tickers + Available Day Ranges are here:
https://apimedia.tiingo.com/docs/tiingo/daily/supported_tickers.zip
or from the TiingoClient.list_tickers() method.
Args:
tickers (string/list): One or more unique identifiers for a stock ticker.
startDate (string): Start of ticker range in YYYY-MM-DD format.
endDate (string): End of ticker range in YYYY-MM-DD format.
metric_name (string): Optional parameter specifying metric to be returned for each
ticker. In the event of a single ticker, this is optional and if not specified
all of the available data will be returned. In the event of a list of tickers,
this parameter is required.
frequency (string): Resample frequency (defaults to daily).
"""
valid_columns = ['open', 'high', 'low', 'close', 'volume', 'adjOpen', 'adjHigh', 'adjLow',
'adjClose', 'adjVolume', 'divCash', 'splitFactor']
if metric_name is not None and metric_name not in valid_columns:
raise APIColumnNameError('Valid data items are: ' + str(valid_columns))
params = {
'format': 'json',
'resampleFreq': frequency
}
if startDate:
params['startDate'] = startDate
if endDate:
params['endDate'] = endDate
if pandas_is_installed:
if type(tickers) is str:
stock = tickers
url = self._get_url(stock, frequency)
response = self._request('GET', url, params=params)
df = pd.DataFrame(response.json())
if metric_name is not None:
prices = df[metric_name]
prices.index = df['date']
else:
prices = df
prices.index = df['date']
del (prices['date'])
else:
prices = pd.DataFrame()
for stock in tickers:
url = self._get_url(stock, frequency)
response = self._request('GET', url, params=params)
df = pd.DataFrame(response.json())
df.index = df['date']
df.rename(index=str, columns={metric_name: stock}, inplace=True)
prices = pd.concat([prices, df[stock]], axis=1)
prices.index = pd.to_datetime(prices.index)
return prices
else:
error_message = ("Pandas is not installed, but .get_ticker_price() was "
"called with fmt=pandas. In order to install tiingo with "
"pandas, reinstall with pandas as an optional dependency. \n"
"Install tiingo with pandas dependency: \'pip install tiingo[pandas]\'\n"
"Alternatively, just install pandas: pip install pandas.")
raise InstallPandasException(error_message)
|
[
"def",
"get_dataframe",
"(",
"self",
",",
"tickers",
",",
"startDate",
"=",
"None",
",",
"endDate",
"=",
"None",
",",
"metric_name",
"=",
"None",
",",
"frequency",
"=",
"'daily'",
")",
":",
"valid_columns",
"=",
"[",
"'open'",
",",
"'high'",
",",
"'low'",
",",
"'close'",
",",
"'volume'",
",",
"'adjOpen'",
",",
"'adjHigh'",
",",
"'adjLow'",
",",
"'adjClose'",
",",
"'adjVolume'",
",",
"'divCash'",
",",
"'splitFactor'",
"]",
"if",
"metric_name",
"is",
"not",
"None",
"and",
"metric_name",
"not",
"in",
"valid_columns",
":",
"raise",
"APIColumnNameError",
"(",
"'Valid data items are: '",
"+",
"str",
"(",
"valid_columns",
")",
")",
"params",
"=",
"{",
"'format'",
":",
"'json'",
",",
"'resampleFreq'",
":",
"frequency",
"}",
"if",
"startDate",
":",
"params",
"[",
"'startDate'",
"]",
"=",
"startDate",
"if",
"endDate",
":",
"params",
"[",
"'endDate'",
"]",
"=",
"endDate",
"if",
"pandas_is_installed",
":",
"if",
"type",
"(",
"tickers",
")",
"is",
"str",
":",
"stock",
"=",
"tickers",
"url",
"=",
"self",
".",
"_get_url",
"(",
"stock",
",",
"frequency",
")",
"response",
"=",
"self",
".",
"_request",
"(",
"'GET'",
",",
"url",
",",
"params",
"=",
"params",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"response",
".",
"json",
"(",
")",
")",
"if",
"metric_name",
"is",
"not",
"None",
":",
"prices",
"=",
"df",
"[",
"metric_name",
"]",
"prices",
".",
"index",
"=",
"df",
"[",
"'date'",
"]",
"else",
":",
"prices",
"=",
"df",
"prices",
".",
"index",
"=",
"df",
"[",
"'date'",
"]",
"del",
"(",
"prices",
"[",
"'date'",
"]",
")",
"else",
":",
"prices",
"=",
"pd",
".",
"DataFrame",
"(",
")",
"for",
"stock",
"in",
"tickers",
":",
"url",
"=",
"self",
".",
"_get_url",
"(",
"stock",
",",
"frequency",
")",
"response",
"=",
"self",
".",
"_request",
"(",
"'GET'",
",",
"url",
",",
"params",
"=",
"params",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"response",
".",
"json",
"(",
")",
")",
"df",
".",
"index",
"=",
"df",
"[",
"'date'",
"]",
"df",
".",
"rename",
"(",
"index",
"=",
"str",
",",
"columns",
"=",
"{",
"metric_name",
":",
"stock",
"}",
",",
"inplace",
"=",
"True",
")",
"prices",
"=",
"pd",
".",
"concat",
"(",
"[",
"prices",
",",
"df",
"[",
"stock",
"]",
"]",
",",
"axis",
"=",
"1",
")",
"prices",
".",
"index",
"=",
"pd",
".",
"to_datetime",
"(",
"prices",
".",
"index",
")",
"return",
"prices",
"else",
":",
"error_message",
"=",
"(",
"\"Pandas is not installed, but .get_ticker_price() was \"",
"\"called with fmt=pandas. In order to install tiingo with \"",
"\"pandas, reinstall with pandas as an optional dependency. \\n\"",
"\"Install tiingo with pandas dependency: \\'pip install tiingo[pandas]\\'\\n\"",
"\"Alternatively, just install pandas: pip install pandas.\"",
")",
"raise",
"InstallPandasException",
"(",
"error_message",
")"
] | 49.130435 | 25.57971 |
def _gam(self):
""" Lorentz factor array
"""
log10gmin = np.log10(self.Eemin / mec2).value
log10gmax = np.log10(self.Eemax / mec2).value
return np.logspace(
log10gmin, log10gmax, int(self.nEed * (log10gmax - log10gmin))
)
|
[
"def",
"_gam",
"(",
"self",
")",
":",
"log10gmin",
"=",
"np",
".",
"log10",
"(",
"self",
".",
"Eemin",
"/",
"mec2",
")",
".",
"value",
"log10gmax",
"=",
"np",
".",
"log10",
"(",
"self",
".",
"Eemax",
"/",
"mec2",
")",
".",
"value",
"return",
"np",
".",
"logspace",
"(",
"log10gmin",
",",
"log10gmax",
",",
"int",
"(",
"self",
".",
"nEed",
"*",
"(",
"log10gmax",
"-",
"log10gmin",
")",
")",
")"
] | 34.25 | 16.125 |
def _gi_build_stub(parent):
"""
Inspect the passed module recursively and build stubs for functions,
classes, etc.
"""
classes = {}
functions = {}
constants = {}
methods = {}
for name in dir(parent):
if name.startswith("__"):
continue
# Check if this is a valid name in python
if not re.match(_identifier_re, name):
continue
try:
obj = getattr(parent, name)
except:
continue
if inspect.isclass(obj):
classes[name] = obj
elif inspect.isfunction(obj) or inspect.isbuiltin(obj):
functions[name] = obj
elif inspect.ismethod(obj) or inspect.ismethoddescriptor(obj):
methods[name] = obj
elif (
str(obj).startswith("<flags")
or str(obj).startswith("<enum ")
or str(obj).startswith("<GType ")
or inspect.isdatadescriptor(obj)
):
constants[name] = 0
elif isinstance(obj, (int, str)):
constants[name] = obj
elif callable(obj):
# Fall back to a function for anything callable
functions[name] = obj
else:
# Assume everything else is some manner of constant
constants[name] = 0
ret = ""
if constants:
ret += "# %s constants\n\n" % parent.__name__
for name in sorted(constants):
if name[0].isdigit():
# GDK has some busted constant names like
# Gdk.EventType.2BUTTON_PRESS
continue
val = constants[name]
strval = str(val)
if isinstance(val, str):
strval = '"%s"' % str(val).replace("\\", "\\\\")
ret += "%s = %s\n" % (name, strval)
if ret:
ret += "\n\n"
if functions:
ret += "# %s functions\n\n" % parent.__name__
for name in sorted(functions):
ret += "def %s(*args, **kwargs):\n" % name
ret += " pass\n"
if ret:
ret += "\n\n"
if methods:
ret += "# %s methods\n\n" % parent.__name__
for name in sorted(methods):
ret += "def %s(self, *args, **kwargs):\n" % name
ret += " pass\n"
if ret:
ret += "\n\n"
if classes:
ret += "# %s classes\n\n" % parent.__name__
for name, obj in sorted(classes.items()):
base = "object"
if issubclass(obj, Exception):
base = "Exception"
ret += "class %s(%s):\n" % (name, base)
classret = _gi_build_stub(obj)
if not classret:
classret = "pass\n"
for line in classret.splitlines():
ret += " " + line + "\n"
ret += "\n"
return ret
|
[
"def",
"_gi_build_stub",
"(",
"parent",
")",
":",
"classes",
"=",
"{",
"}",
"functions",
"=",
"{",
"}",
"constants",
"=",
"{",
"}",
"methods",
"=",
"{",
"}",
"for",
"name",
"in",
"dir",
"(",
"parent",
")",
":",
"if",
"name",
".",
"startswith",
"(",
"\"__\"",
")",
":",
"continue",
"# Check if this is a valid name in python",
"if",
"not",
"re",
".",
"match",
"(",
"_identifier_re",
",",
"name",
")",
":",
"continue",
"try",
":",
"obj",
"=",
"getattr",
"(",
"parent",
",",
"name",
")",
"except",
":",
"continue",
"if",
"inspect",
".",
"isclass",
"(",
"obj",
")",
":",
"classes",
"[",
"name",
"]",
"=",
"obj",
"elif",
"inspect",
".",
"isfunction",
"(",
"obj",
")",
"or",
"inspect",
".",
"isbuiltin",
"(",
"obj",
")",
":",
"functions",
"[",
"name",
"]",
"=",
"obj",
"elif",
"inspect",
".",
"ismethod",
"(",
"obj",
")",
"or",
"inspect",
".",
"ismethoddescriptor",
"(",
"obj",
")",
":",
"methods",
"[",
"name",
"]",
"=",
"obj",
"elif",
"(",
"str",
"(",
"obj",
")",
".",
"startswith",
"(",
"\"<flags\"",
")",
"or",
"str",
"(",
"obj",
")",
".",
"startswith",
"(",
"\"<enum \"",
")",
"or",
"str",
"(",
"obj",
")",
".",
"startswith",
"(",
"\"<GType \"",
")",
"or",
"inspect",
".",
"isdatadescriptor",
"(",
"obj",
")",
")",
":",
"constants",
"[",
"name",
"]",
"=",
"0",
"elif",
"isinstance",
"(",
"obj",
",",
"(",
"int",
",",
"str",
")",
")",
":",
"constants",
"[",
"name",
"]",
"=",
"obj",
"elif",
"callable",
"(",
"obj",
")",
":",
"# Fall back to a function for anything callable",
"functions",
"[",
"name",
"]",
"=",
"obj",
"else",
":",
"# Assume everything else is some manner of constant",
"constants",
"[",
"name",
"]",
"=",
"0",
"ret",
"=",
"\"\"",
"if",
"constants",
":",
"ret",
"+=",
"\"# %s constants\\n\\n\"",
"%",
"parent",
".",
"__name__",
"for",
"name",
"in",
"sorted",
"(",
"constants",
")",
":",
"if",
"name",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
":",
"# GDK has some busted constant names like",
"# Gdk.EventType.2BUTTON_PRESS",
"continue",
"val",
"=",
"constants",
"[",
"name",
"]",
"strval",
"=",
"str",
"(",
"val",
")",
"if",
"isinstance",
"(",
"val",
",",
"str",
")",
":",
"strval",
"=",
"'\"%s\"'",
"%",
"str",
"(",
"val",
")",
".",
"replace",
"(",
"\"\\\\\"",
",",
"\"\\\\\\\\\"",
")",
"ret",
"+=",
"\"%s = %s\\n\"",
"%",
"(",
"name",
",",
"strval",
")",
"if",
"ret",
":",
"ret",
"+=",
"\"\\n\\n\"",
"if",
"functions",
":",
"ret",
"+=",
"\"# %s functions\\n\\n\"",
"%",
"parent",
".",
"__name__",
"for",
"name",
"in",
"sorted",
"(",
"functions",
")",
":",
"ret",
"+=",
"\"def %s(*args, **kwargs):\\n\"",
"%",
"name",
"ret",
"+=",
"\" pass\\n\"",
"if",
"ret",
":",
"ret",
"+=",
"\"\\n\\n\"",
"if",
"methods",
":",
"ret",
"+=",
"\"# %s methods\\n\\n\"",
"%",
"parent",
".",
"__name__",
"for",
"name",
"in",
"sorted",
"(",
"methods",
")",
":",
"ret",
"+=",
"\"def %s(self, *args, **kwargs):\\n\"",
"%",
"name",
"ret",
"+=",
"\" pass\\n\"",
"if",
"ret",
":",
"ret",
"+=",
"\"\\n\\n\"",
"if",
"classes",
":",
"ret",
"+=",
"\"# %s classes\\n\\n\"",
"%",
"parent",
".",
"__name__",
"for",
"name",
",",
"obj",
"in",
"sorted",
"(",
"classes",
".",
"items",
"(",
")",
")",
":",
"base",
"=",
"\"object\"",
"if",
"issubclass",
"(",
"obj",
",",
"Exception",
")",
":",
"base",
"=",
"\"Exception\"",
"ret",
"+=",
"\"class %s(%s):\\n\"",
"%",
"(",
"name",
",",
"base",
")",
"classret",
"=",
"_gi_build_stub",
"(",
"obj",
")",
"if",
"not",
"classret",
":",
"classret",
"=",
"\"pass\\n\"",
"for",
"line",
"in",
"classret",
".",
"splitlines",
"(",
")",
":",
"ret",
"+=",
"\" \"",
"+",
"line",
"+",
"\"\\n\"",
"ret",
"+=",
"\"\\n\"",
"return",
"ret"
] | 27.40625 | 17.78125 |
def geometric_transform(data, mapping = "c0,c1", output_shape=None,
mode='constant', interpolation="linear"):
"""
Apply an arbitrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
Parameters
----------
%(input)s
mapping : {callable, scipy.LowLevelCallable}
A callable object that accepts a tuple of length equal to the output
array rank, and returns the corresponding input coordinates as a tuple
of length equal to the input array rank.
"""
if not (isinstance(data, np.ndarray) and data.ndim in (2, 3)):
raise ValueError("input data has to be a 2d or 3d array!")
interpolation_defines = {"linear": ["-D", "SAMPLER_FILTER=CLK_FILTER_LINEAR"],
"nearest": ["-D", "SAMPLER_FILTER=CLK_FILTER_NEAREST"]}
mode_defines = {"constant": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP"],
"wrap": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_REPEAT"],
"edge": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP_TO_EDGE"]
}
if not interpolation in interpolation_defines:
raise KeyError(
"interpolation = '%s' not defined ,valid: %s" % (interpolation, list(interpolation_defines.keys())))
if not mode in mode_defines:
raise KeyError("mode = '%s' not defined ,valid: %s" % (mode, list(mode_defines.keys())))
if not data.dtype.type in cl_buffer_datatype_dict:
raise KeyError("dtype %s not supported yet (%s)" % (data.dtype.type, tuple(cl_buffer_datatype_dict.keys())))
dtype_defines = ["-D", "DTYPE={type}".format(type=cl_buffer_datatype_dict[data.dtype.type])]
image_functions = {np.float32:"read_imagef",
np.uint8: "read_imageui",
np.uint16: "read_imageui",
np.int32: "read_imagei"}
image_read_defines = ["-D","READ_IMAGE=%s"%image_functions[data.dtype.type]]
with open(abspath("kernels/geometric_transform.cl"), "r") as f:
tpl = Template(f.read())
output_shape = tuple(output_shape)
mappings = {"FUNC2": "c1,c0",
"FUNC3": "c2,c1,c0"}
mappings["FUNC%d" % data.ndim] = ",".join(reversed(mapping.split(",")))
rendered = tpl.render(**mappings)
d_im = OCLImage.from_array(data)
res_g = OCLArray.empty(output_shape, data.dtype)
prog = OCLProgram(src_str=rendered,
build_options=interpolation_defines[interpolation] +
mode_defines[mode] + dtype_defines+image_read_defines)
kernel = "geometric_transform{ndim}".format(ndim=data.ndim)
prog.run_kernel(kernel,
output_shape[::-1], None,
d_im, res_g.data)
return res_g.get()
|
[
"def",
"geometric_transform",
"(",
"data",
",",
"mapping",
"=",
"\"c0,c1\"",
",",
"output_shape",
"=",
"None",
",",
"mode",
"=",
"'constant'",
",",
"interpolation",
"=",
"\"linear\"",
")",
":",
"if",
"not",
"(",
"isinstance",
"(",
"data",
",",
"np",
".",
"ndarray",
")",
"and",
"data",
".",
"ndim",
"in",
"(",
"2",
",",
"3",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"input data has to be a 2d or 3d array!\"",
")",
"interpolation_defines",
"=",
"{",
"\"linear\"",
":",
"[",
"\"-D\"",
",",
"\"SAMPLER_FILTER=CLK_FILTER_LINEAR\"",
"]",
",",
"\"nearest\"",
":",
"[",
"\"-D\"",
",",
"\"SAMPLER_FILTER=CLK_FILTER_NEAREST\"",
"]",
"}",
"mode_defines",
"=",
"{",
"\"constant\"",
":",
"[",
"\"-D\"",
",",
"\"SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP\"",
"]",
",",
"\"wrap\"",
":",
"[",
"\"-D\"",
",",
"\"SAMPLER_ADDRESS=CLK_ADDRESS_REPEAT\"",
"]",
",",
"\"edge\"",
":",
"[",
"\"-D\"",
",",
"\"SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP_TO_EDGE\"",
"]",
"}",
"if",
"not",
"interpolation",
"in",
"interpolation_defines",
":",
"raise",
"KeyError",
"(",
"\"interpolation = '%s' not defined ,valid: %s\"",
"%",
"(",
"interpolation",
",",
"list",
"(",
"interpolation_defines",
".",
"keys",
"(",
")",
")",
")",
")",
"if",
"not",
"mode",
"in",
"mode_defines",
":",
"raise",
"KeyError",
"(",
"\"mode = '%s' not defined ,valid: %s\"",
"%",
"(",
"mode",
",",
"list",
"(",
"mode_defines",
".",
"keys",
"(",
")",
")",
")",
")",
"if",
"not",
"data",
".",
"dtype",
".",
"type",
"in",
"cl_buffer_datatype_dict",
":",
"raise",
"KeyError",
"(",
"\"dtype %s not supported yet (%s)\"",
"%",
"(",
"data",
".",
"dtype",
".",
"type",
",",
"tuple",
"(",
"cl_buffer_datatype_dict",
".",
"keys",
"(",
")",
")",
")",
")",
"dtype_defines",
"=",
"[",
"\"-D\"",
",",
"\"DTYPE={type}\"",
".",
"format",
"(",
"type",
"=",
"cl_buffer_datatype_dict",
"[",
"data",
".",
"dtype",
".",
"type",
"]",
")",
"]",
"image_functions",
"=",
"{",
"np",
".",
"float32",
":",
"\"read_imagef\"",
",",
"np",
".",
"uint8",
":",
"\"read_imageui\"",
",",
"np",
".",
"uint16",
":",
"\"read_imageui\"",
",",
"np",
".",
"int32",
":",
"\"read_imagei\"",
"}",
"image_read_defines",
"=",
"[",
"\"-D\"",
",",
"\"READ_IMAGE=%s\"",
"%",
"image_functions",
"[",
"data",
".",
"dtype",
".",
"type",
"]",
"]",
"with",
"open",
"(",
"abspath",
"(",
"\"kernels/geometric_transform.cl\"",
")",
",",
"\"r\"",
")",
"as",
"f",
":",
"tpl",
"=",
"Template",
"(",
"f",
".",
"read",
"(",
")",
")",
"output_shape",
"=",
"tuple",
"(",
"output_shape",
")",
"mappings",
"=",
"{",
"\"FUNC2\"",
":",
"\"c1,c0\"",
",",
"\"FUNC3\"",
":",
"\"c2,c1,c0\"",
"}",
"mappings",
"[",
"\"FUNC%d\"",
"%",
"data",
".",
"ndim",
"]",
"=",
"\",\"",
".",
"join",
"(",
"reversed",
"(",
"mapping",
".",
"split",
"(",
"\",\"",
")",
")",
")",
"rendered",
"=",
"tpl",
".",
"render",
"(",
"*",
"*",
"mappings",
")",
"d_im",
"=",
"OCLImage",
".",
"from_array",
"(",
"data",
")",
"res_g",
"=",
"OCLArray",
".",
"empty",
"(",
"output_shape",
",",
"data",
".",
"dtype",
")",
"prog",
"=",
"OCLProgram",
"(",
"src_str",
"=",
"rendered",
",",
"build_options",
"=",
"interpolation_defines",
"[",
"interpolation",
"]",
"+",
"mode_defines",
"[",
"mode",
"]",
"+",
"dtype_defines",
"+",
"image_read_defines",
")",
"kernel",
"=",
"\"geometric_transform{ndim}\"",
".",
"format",
"(",
"ndim",
"=",
"data",
".",
"ndim",
")",
"prog",
".",
"run_kernel",
"(",
"kernel",
",",
"output_shape",
"[",
":",
":",
"-",
"1",
"]",
",",
"None",
",",
"d_im",
",",
"res_g",
".",
"data",
")",
"return",
"res_g",
".",
"get",
"(",
")"
] | 39.958904 | 27.219178 |
def format_description(self, description):
"""
No documentation
"""
if not description: return ""
desc_width = self.width - self.current_indent
indent = " "*self.current_indent
# the above is still the same
bits = description.split('\n')
formatted_bits = [
textwrap.fill(bit,
desc_width,
initial_indent=indent,
subsequent_indent=indent)
for bit in bits]
result = "\n".join(formatted_bits) + "\n"
return result
|
[
"def",
"format_description",
"(",
"self",
",",
"description",
")",
":",
"if",
"not",
"description",
":",
"return",
"\"\"",
"desc_width",
"=",
"self",
".",
"width",
"-",
"self",
".",
"current_indent",
"indent",
"=",
"\" \"",
"*",
"self",
".",
"current_indent",
"# the above is still the same",
"bits",
"=",
"description",
".",
"split",
"(",
"'\\n'",
")",
"formatted_bits",
"=",
"[",
"textwrap",
".",
"fill",
"(",
"bit",
",",
"desc_width",
",",
"initial_indent",
"=",
"indent",
",",
"subsequent_indent",
"=",
"indent",
")",
"for",
"bit",
"in",
"bits",
"]",
"result",
"=",
"\"\\n\"",
".",
"join",
"(",
"formatted_bits",
")",
"+",
"\"\\n\"",
"return",
"result"
] | 32.529412 | 7 |
def check_scope(self, token, request):
http_method = request.method
if not hasattr(self, http_method):
raise OAuthError("HTTP method is not recognized")
required_scopes = getattr(self, http_method)
# a None scope means always allowed
if required_scopes is None:
return True
"""
The required scope is either a string or an iterable. If string,
check if it is allowed for our access token otherwise, iterate through
the required_scopes to see which scopes are allowed
"""
# for non iterable types
if isinstance(required_scopes, six.string_types):
if token.allow_scopes(required_scopes.split()):
return [required_scopes]
return []
allowed_scopes = []
try:
for scope in required_scopes:
if token.allow_scopes(scope.split()):
allowed_scopes.append(scope)
except:
raise Exception('Invalid required scope values')
else:
return allowed_scopes
|
[
"def",
"check_scope",
"(",
"self",
",",
"token",
",",
"request",
")",
":",
"http_method",
"=",
"request",
".",
"method",
"if",
"not",
"hasattr",
"(",
"self",
",",
"http_method",
")",
":",
"raise",
"OAuthError",
"(",
"\"HTTP method is not recognized\"",
")",
"required_scopes",
"=",
"getattr",
"(",
"self",
",",
"http_method",
")",
"# a None scope means always allowed",
"if",
"required_scopes",
"is",
"None",
":",
"return",
"True",
"# for non iterable types",
"if",
"isinstance",
"(",
"required_scopes",
",",
"six",
".",
"string_types",
")",
":",
"if",
"token",
".",
"allow_scopes",
"(",
"required_scopes",
".",
"split",
"(",
")",
")",
":",
"return",
"[",
"required_scopes",
"]",
"return",
"[",
"]",
"allowed_scopes",
"=",
"[",
"]",
"try",
":",
"for",
"scope",
"in",
"required_scopes",
":",
"if",
"token",
".",
"allow_scopes",
"(",
"scope",
".",
"split",
"(",
")",
")",
":",
"allowed_scopes",
".",
"append",
"(",
"scope",
")",
"except",
":",
"raise",
"Exception",
"(",
"'Invalid required scope values'",
")",
"else",
":",
"return",
"allowed_scopes"
] | 39.703704 | 13.333333 |
def timid(ctxt, test, key=None, check=False, exts=None):
"""
Execute a test described by a YAML file.
:param ctxt: A ``timid.context.Context`` object.
:param test: The name of a YAML file containing the test
description. Note that the current working directory
set up in ``ctxt.environment`` does not affect the
resolution of this file.
:param key: An optional key into the test description file. If
not ``None``, the file named by ``test`` must be a
YAML dictionary of lists of steps; otherwise, it must
be a simple list of steps.
:param check: If ``True``, only performs a syntax check of the
test steps indicated by ``test`` and ``key``; the
test itself is not run.
:param exts: An instance of ``timid.extensions.ExtensionSet``
describing the extensions to be called while
processing the test steps.
"""
# Normalize the extension set
if exts is None:
exts = extensions.ExtensionSet()
# Begin by reading the steps and adding them to the list in the
# context (which may already have elements thanks to the
# extensions)
ctxt.emit('Reading test steps from %s%s...' %
(test, '[%s]' % key if key else ''), debug=True)
ctxt.steps += exts.read_steps(ctxt, steps.Step.parse_file(ctxt, test, key))
# If all we were supposed to do was check, well, we've
# accomplished that...
if check:
return None
# Now we execute each step in turn
for idx, step in enumerate(ctxt.steps):
# Emit information about what we're doing
ctxt.emit('[Step %d]: %s . . .' % (idx, step.name))
# Run through extension hooks
if exts.pre_step(ctxt, step, idx):
ctxt.emit('[Step %d]: `- Step %s' %
(idx, steps.states[steps.SKIPPED]))
continue
# Now execute the step
result = step(ctxt)
# Let the extensions process the result of the step
exts.post_step(ctxt, step, idx, result)
# Emit the result
ctxt.emit('[Step %d]: `- Step %s%s' %
(idx, steps.states[result.state],
' (ignored)' if result.ignore else ''))
# Was the step a success?
if not result:
msg = 'Test step failure'
if result.msg:
msg += ': %s' % result.msg
return msg
# All done! And a success, to boot...
return None
|
[
"def",
"timid",
"(",
"ctxt",
",",
"test",
",",
"key",
"=",
"None",
",",
"check",
"=",
"False",
",",
"exts",
"=",
"None",
")",
":",
"# Normalize the extension set",
"if",
"exts",
"is",
"None",
":",
"exts",
"=",
"extensions",
".",
"ExtensionSet",
"(",
")",
"# Begin by reading the steps and adding them to the list in the",
"# context (which may already have elements thanks to the",
"# extensions)",
"ctxt",
".",
"emit",
"(",
"'Reading test steps from %s%s...'",
"%",
"(",
"test",
",",
"'[%s]'",
"%",
"key",
"if",
"key",
"else",
"''",
")",
",",
"debug",
"=",
"True",
")",
"ctxt",
".",
"steps",
"+=",
"exts",
".",
"read_steps",
"(",
"ctxt",
",",
"steps",
".",
"Step",
".",
"parse_file",
"(",
"ctxt",
",",
"test",
",",
"key",
")",
")",
"# If all we were supposed to do was check, well, we've",
"# accomplished that...",
"if",
"check",
":",
"return",
"None",
"# Now we execute each step in turn",
"for",
"idx",
",",
"step",
"in",
"enumerate",
"(",
"ctxt",
".",
"steps",
")",
":",
"# Emit information about what we're doing",
"ctxt",
".",
"emit",
"(",
"'[Step %d]: %s . . .'",
"%",
"(",
"idx",
",",
"step",
".",
"name",
")",
")",
"# Run through extension hooks",
"if",
"exts",
".",
"pre_step",
"(",
"ctxt",
",",
"step",
",",
"idx",
")",
":",
"ctxt",
".",
"emit",
"(",
"'[Step %d]: `- Step %s'",
"%",
"(",
"idx",
",",
"steps",
".",
"states",
"[",
"steps",
".",
"SKIPPED",
"]",
")",
")",
"continue",
"# Now execute the step",
"result",
"=",
"step",
"(",
"ctxt",
")",
"# Let the extensions process the result of the step",
"exts",
".",
"post_step",
"(",
"ctxt",
",",
"step",
",",
"idx",
",",
"result",
")",
"# Emit the result",
"ctxt",
".",
"emit",
"(",
"'[Step %d]: `- Step %s%s'",
"%",
"(",
"idx",
",",
"steps",
".",
"states",
"[",
"result",
".",
"state",
"]",
",",
"' (ignored)'",
"if",
"result",
".",
"ignore",
"else",
"''",
")",
")",
"# Was the step a success?",
"if",
"not",
"result",
":",
"msg",
"=",
"'Test step failure'",
"if",
"result",
".",
"msg",
":",
"msg",
"+=",
"': %s'",
"%",
"result",
".",
"msg",
"return",
"msg",
"# All done! And a success, to boot...",
"return",
"None"
] | 36.333333 | 18.73913 |
def allreduce(self, x, mesh_axes, reduction_fn_string):
"""Grouped allreduce, (across the given dimensions).
Args:
x: a LaidOutTensor
mesh_axes: a list of integers - the mesh dimensions to be reduced
reduction_fn_string: "SUM" or "MAX"
Returns:
a LaidOutTensor
"""
return self._collective_with_groups(
x, mesh_axes, functools.partial(
allreduce_ring, reduction_fn_string=reduction_fn_string))
|
[
"def",
"allreduce",
"(",
"self",
",",
"x",
",",
"mesh_axes",
",",
"reduction_fn_string",
")",
":",
"return",
"self",
".",
"_collective_with_groups",
"(",
"x",
",",
"mesh_axes",
",",
"functools",
".",
"partial",
"(",
"allreduce_ring",
",",
"reduction_fn_string",
"=",
"reduction_fn_string",
")",
")"
] | 34.230769 | 16.153846 |
def getComponentStateForDevicePath(self, pchRenderModelName, pchComponentName, devicePath):
"""
Use this to query information about the component, as a function of the controller state.
* For dynamic controller components (ex: trigger) values will reflect component motions
For static components this will return a consistent value independent of the VRControllerState_t
* If the pchRenderModelName or pchComponentName is invalid, this will return false (and transforms will be set to identity).
Otherwise, return true
Note: For dynamic objects, visibility may be dynamic. (I.e., true/false will be returned based on controller state and controller mode state )
"""
fn = self.function_table.getComponentStateForDevicePath
pState = RenderModel_ControllerMode_State_t()
pComponentState = RenderModel_ComponentState_t()
result = fn(pchRenderModelName, pchComponentName, devicePath, byref(pState), byref(pComponentState))
return result, pState, pComponentState
|
[
"def",
"getComponentStateForDevicePath",
"(",
"self",
",",
"pchRenderModelName",
",",
"pchComponentName",
",",
"devicePath",
")",
":",
"fn",
"=",
"self",
".",
"function_table",
".",
"getComponentStateForDevicePath",
"pState",
"=",
"RenderModel_ControllerMode_State_t",
"(",
")",
"pComponentState",
"=",
"RenderModel_ComponentState_t",
"(",
")",
"result",
"=",
"fn",
"(",
"pchRenderModelName",
",",
"pchComponentName",
",",
"devicePath",
",",
"byref",
"(",
"pState",
")",
",",
"byref",
"(",
"pComponentState",
")",
")",
"return",
"result",
",",
"pState",
",",
"pComponentState"
] | 69.8 | 40.333333 |
def get_stp_mst_detail_output_msti_port_spanningtree_enabled(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
msti = ET.SubElement(output, "msti")
instance_id_key = ET.SubElement(msti, "instance-id")
instance_id_key.text = kwargs.pop('instance_id')
port = ET.SubElement(msti, "port")
spanningtree_enabled = ET.SubElement(port, "spanningtree-enabled")
spanningtree_enabled.text = kwargs.pop('spanningtree_enabled')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
[
"def",
"get_stp_mst_detail_output_msti_port_spanningtree_enabled",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"get_stp_mst_detail",
"=",
"ET",
".",
"Element",
"(",
"\"get_stp_mst_detail\"",
")",
"config",
"=",
"get_stp_mst_detail",
"output",
"=",
"ET",
".",
"SubElement",
"(",
"get_stp_mst_detail",
",",
"\"output\"",
")",
"msti",
"=",
"ET",
".",
"SubElement",
"(",
"output",
",",
"\"msti\"",
")",
"instance_id_key",
"=",
"ET",
".",
"SubElement",
"(",
"msti",
",",
"\"instance-id\"",
")",
"instance_id_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'instance_id'",
")",
"port",
"=",
"ET",
".",
"SubElement",
"(",
"msti",
",",
"\"port\"",
")",
"spanningtree_enabled",
"=",
"ET",
".",
"SubElement",
"(",
"port",
",",
"\"spanningtree-enabled\"",
")",
"spanningtree_enabled",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'spanningtree_enabled'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | 46.5625 | 16.125 |
def fbool(value):
"""boolean"""
if isinstance(value, str_types):
value = value.lower()
if value == "false":
value = False
elif value == "true":
value = True
elif value:
value = bool(float(value))
else:
raise ValueError("empty string")
else:
value = bool(float(value))
return value
|
[
"def",
"fbool",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"str_types",
")",
":",
"value",
"=",
"value",
".",
"lower",
"(",
")",
"if",
"value",
"==",
"\"false\"",
":",
"value",
"=",
"False",
"elif",
"value",
"==",
"\"true\"",
":",
"value",
"=",
"True",
"elif",
"value",
":",
"value",
"=",
"bool",
"(",
"float",
"(",
"value",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"empty string\"",
")",
"else",
":",
"value",
"=",
"bool",
"(",
"float",
"(",
"value",
")",
")",
"return",
"value"
] | 25.2 | 13.8 |
def bins(start, stop, fmt='gff', one=True):
"""
Uses the definition of a "genomic bin" described in Fig 7 of
http://genome.cshlp.org/content/12/6/996.abstract.
Parameters
----------
one : boolean
If `one=True` (default), then only return the smallest bin that
completely contains these coordinates (useful for assigning a single
bin).
If `one=False`, then return the set of *all* bins that overlap these
coordinates (useful for looking for features that could intersect)
fmt : 'gff' | 'bed'
This specifies 1-based start coords (gff) or 0-based start coords (bed)
"""
# For very large coordinates, return 1 which is "somewhere on the
# chromosome".
if start >= MAX_CHROM_SIZE or stop >= MAX_CHROM_SIZE:
if one:
return 1
else:
return set([1])
# Jump to highest resolution bin that will fit these coords (depending on
# whether we have a BED or GFF-style coordinate).
#
# Some GFF files include negative coords, which will throw off this
# calculation. If negative coords, then set the bin to the largest
# possible.
if start < 0:
if one:
return 1
else:
return set([1])
if stop < 0:
if one:
return 1
else:
return set([1])
start = (start - COORD_OFFSETS[fmt]) >> FIRST_SHIFT
stop = (stop) >> FIRST_SHIFT
# We always at least fit within the chrom, which is bin 1.
bins = set([1])
for offset in OFFSETS:
# Since we're going from smallest to largest bins, the first one where
# the feature's start and stop positions are both within the same bin
# is the smallest one these coords fit within.
if one:
if start == stop:
# Note that at this point, because of the bit-shifting, `start`
# is the number of bins (at this current level). So we need to
# add it to `offset` to get the actual bin ID.
return offset + start
# See the Fig 7 reproduction above to see why range().
bins.update(list(range(offset + start, offset + stop + 1)))
# Move to the next level (8x larger bin size; i.e., 2**NEXT_SHIFT
# larger bin size)
start >>= NEXT_SHIFT
stop >>= NEXT_SHIFT
return bins
|
[
"def",
"bins",
"(",
"start",
",",
"stop",
",",
"fmt",
"=",
"'gff'",
",",
"one",
"=",
"True",
")",
":",
"# For very large coordinates, return 1 which is \"somewhere on the",
"# chromosome\".",
"if",
"start",
">=",
"MAX_CHROM_SIZE",
"or",
"stop",
">=",
"MAX_CHROM_SIZE",
":",
"if",
"one",
":",
"return",
"1",
"else",
":",
"return",
"set",
"(",
"[",
"1",
"]",
")",
"# Jump to highest resolution bin that will fit these coords (depending on",
"# whether we have a BED or GFF-style coordinate).",
"#",
"# Some GFF files include negative coords, which will throw off this",
"# calculation. If negative coords, then set the bin to the largest",
"# possible.",
"if",
"start",
"<",
"0",
":",
"if",
"one",
":",
"return",
"1",
"else",
":",
"return",
"set",
"(",
"[",
"1",
"]",
")",
"if",
"stop",
"<",
"0",
":",
"if",
"one",
":",
"return",
"1",
"else",
":",
"return",
"set",
"(",
"[",
"1",
"]",
")",
"start",
"=",
"(",
"start",
"-",
"COORD_OFFSETS",
"[",
"fmt",
"]",
")",
">>",
"FIRST_SHIFT",
"stop",
"=",
"(",
"stop",
")",
">>",
"FIRST_SHIFT",
"# We always at least fit within the chrom, which is bin 1.",
"bins",
"=",
"set",
"(",
"[",
"1",
"]",
")",
"for",
"offset",
"in",
"OFFSETS",
":",
"# Since we're going from smallest to largest bins, the first one where",
"# the feature's start and stop positions are both within the same bin",
"# is the smallest one these coords fit within.",
"if",
"one",
":",
"if",
"start",
"==",
"stop",
":",
"# Note that at this point, because of the bit-shifting, `start`",
"# is the number of bins (at this current level). So we need to",
"# add it to `offset` to get the actual bin ID.",
"return",
"offset",
"+",
"start",
"# See the Fig 7 reproduction above to see why range().",
"bins",
".",
"update",
"(",
"list",
"(",
"range",
"(",
"offset",
"+",
"start",
",",
"offset",
"+",
"stop",
"+",
"1",
")",
")",
")",
"# Move to the next level (8x larger bin size; i.e., 2**NEXT_SHIFT",
"# larger bin size)",
"start",
">>=",
"NEXT_SHIFT",
"stop",
">>=",
"NEXT_SHIFT",
"return",
"bins"
] | 32.788732 | 25.521127 |
def JsonResponseModel(self):
"""In this context, return raw JSON instead of proto."""
old_model = self.response_type_model
self.__response_type_model = 'json'
yield
self.__response_type_model = old_model
|
[
"def",
"JsonResponseModel",
"(",
"self",
")",
":",
"old_model",
"=",
"self",
".",
"response_type_model",
"self",
".",
"__response_type_model",
"=",
"'json'",
"yield",
"self",
".",
"__response_type_model",
"=",
"old_model"
] | 39.666667 | 8.666667 |
def select(self, condition, name=''):
"""
Selects nuclei according to a condition on Z,N or M
Parameters
----------
condition : function,
Can have one of the signatures f(M), f(Z,N) or f(Z, N, M)
must return a boolean value
name: string, optional name for the resulting Table
Example:
--------
Select all nuclei with A > 160:
>>> A_gt_160 = lambda Z,N: Z + N > 160
>>> Table('AME2003').select(A_gt_160)
"""
if condition.func_code.co_argcount == 1:
idx = [(Z, N) for (Z, N), M in self if condition(M)]
if condition.func_code.co_argcount == 2:
idx = [(Z, N) for (Z, N) in self.index if condition(Z, N)]
if condition.func_code.co_argcount == 3:
idx = [(Z, N) for (Z, N), M in self if condition(Z, N, M)]
index = pd.MultiIndex.from_tuples(idx, names=['Z', 'N'])
return Table(df=self.df.ix[index], name=name)
|
[
"def",
"select",
"(",
"self",
",",
"condition",
",",
"name",
"=",
"''",
")",
":",
"if",
"condition",
".",
"func_code",
".",
"co_argcount",
"==",
"1",
":",
"idx",
"=",
"[",
"(",
"Z",
",",
"N",
")",
"for",
"(",
"Z",
",",
"N",
")",
",",
"M",
"in",
"self",
"if",
"condition",
"(",
"M",
")",
"]",
"if",
"condition",
".",
"func_code",
".",
"co_argcount",
"==",
"2",
":",
"idx",
"=",
"[",
"(",
"Z",
",",
"N",
")",
"for",
"(",
"Z",
",",
"N",
")",
"in",
"self",
".",
"index",
"if",
"condition",
"(",
"Z",
",",
"N",
")",
"]",
"if",
"condition",
".",
"func_code",
".",
"co_argcount",
"==",
"3",
":",
"idx",
"=",
"[",
"(",
"Z",
",",
"N",
")",
"for",
"(",
"Z",
",",
"N",
")",
",",
"M",
"in",
"self",
"if",
"condition",
"(",
"Z",
",",
"N",
",",
"M",
")",
"]",
"index",
"=",
"pd",
".",
"MultiIndex",
".",
"from_tuples",
"(",
"idx",
",",
"names",
"=",
"[",
"'Z'",
",",
"'N'",
"]",
")",
"return",
"Table",
"(",
"df",
"=",
"self",
".",
"df",
".",
"ix",
"[",
"index",
"]",
",",
"name",
"=",
"name",
")"
] | 37.576923 | 17.346154 |
def suggest(self, history, searchspace):
"""
Suggest params to maximize an objective function based on the
function evaluation history using a tree of Parzen estimators (TPE),
as implemented in the hyperopt package.
Use of this function requires that hyperopt be installed.
"""
# This function is very odd, because as far as I can tell there's
# no real documented API for any of the internals of hyperopt. Its
# execution model is that hyperopt calls your objective function
# (instead of merely providing you with suggested points, and then
# you calling the function yourself), and its very tricky (for me)
# to use the internal hyperopt data structures to get these predictions
# out directly.
# so they path we take in this function is to construct a synthetic
# hyperopt.Trials database which from the `history`, and then call
# hyoperopt.fmin with a dummy objective function that logs the value
# used, and then return that value to our client.
# The form of the hyperopt.Trials database isn't really documented in
# the code -- most of this comes from reverse engineering it, by
# running fmin() on a simple function and then inspecting the form of
# the resulting trials object.
if 'hyperopt' not in sys.modules:
raise ImportError('No module named hyperopt')
random = check_random_state(self.seed)
hp_searchspace = searchspace.to_hyperopt()
trials = Trials()
for i, (params, scores, status) in enumerate(history):
if status == 'SUCCEEDED':
# we're doing maximization, hyperopt.fmin() does minimization,
# so we need to swap the sign
result = {'loss': -np.mean(scores), 'status': STATUS_OK}
elif status == 'PENDING':
result = {'status': STATUS_RUNNING}
elif status == 'FAILED':
result = {'status': STATUS_FAIL}
else:
raise RuntimeError('unrecognized status: %s' % status)
# the vals key in the trials dict is basically just the params
# dict, but enum variables (hyperopt hp.choice() nodes) are
# different, because the index of the parameter is specified
# in vals, not the parameter itself.
vals = {}
for var in searchspace:
if isinstance(var, EnumVariable):
# get the index in the choices of the parameter, and use
# that.
matches = [
i for i, c in enumerate(var.choices)
if c == params[var.name]
]
assert len(matches) == 1
vals[var.name] = matches
else:
# the other big difference is that all of the param values
# are wrapped in length-1 lists.
vals[var.name] = [params[var.name]]
trials.insert_trial_doc({
'misc': {
'cmd': ('domain_attachment', 'FMinIter_Domain'),
'idxs': dict((k, [i]) for k in hp_searchspace.keys()),
'tid': i,
'vals': vals,
'workdir': None
},
'result': result,
'tid': i,
# bunch of fixed fields that hyperopt seems to require
'owner': None,
'spec': None,
'state': 2,
'book_time': None,
'exp_key': None,
'refresh_time': None,
'version': 0
})
trials.refresh()
chosen_params_container = []
def suggest(*args, **kwargs):
return tpe.suggest(*args,
**kwargs,
gamma=self.gamma,
n_startup_jobs=self.seeds)
def mock_fn(x):
# http://stackoverflow.com/a/3190783/1079728
# to get around no nonlocal keywork in python2
chosen_params_container.append(x)
return 0
fmin(fn=mock_fn,
algo=tpe.suggest,
space=hp_searchspace,
trials=trials,
max_evals=len(trials.trials) + 1,
**self._hyperopt_fmin_random_kwarg(random))
chosen_params = chosen_params_container[0]
return chosen_params
|
[
"def",
"suggest",
"(",
"self",
",",
"history",
",",
"searchspace",
")",
":",
"# This function is very odd, because as far as I can tell there's",
"# no real documented API for any of the internals of hyperopt. Its",
"# execution model is that hyperopt calls your objective function",
"# (instead of merely providing you with suggested points, and then",
"# you calling the function yourself), and its very tricky (for me)",
"# to use the internal hyperopt data structures to get these predictions",
"# out directly.",
"# so they path we take in this function is to construct a synthetic",
"# hyperopt.Trials database which from the `history`, and then call",
"# hyoperopt.fmin with a dummy objective function that logs the value",
"# used, and then return that value to our client.",
"# The form of the hyperopt.Trials database isn't really documented in",
"# the code -- most of this comes from reverse engineering it, by",
"# running fmin() on a simple function and then inspecting the form of",
"# the resulting trials object.",
"if",
"'hyperopt'",
"not",
"in",
"sys",
".",
"modules",
":",
"raise",
"ImportError",
"(",
"'No module named hyperopt'",
")",
"random",
"=",
"check_random_state",
"(",
"self",
".",
"seed",
")",
"hp_searchspace",
"=",
"searchspace",
".",
"to_hyperopt",
"(",
")",
"trials",
"=",
"Trials",
"(",
")",
"for",
"i",
",",
"(",
"params",
",",
"scores",
",",
"status",
")",
"in",
"enumerate",
"(",
"history",
")",
":",
"if",
"status",
"==",
"'SUCCEEDED'",
":",
"# we're doing maximization, hyperopt.fmin() does minimization,",
"# so we need to swap the sign",
"result",
"=",
"{",
"'loss'",
":",
"-",
"np",
".",
"mean",
"(",
"scores",
")",
",",
"'status'",
":",
"STATUS_OK",
"}",
"elif",
"status",
"==",
"'PENDING'",
":",
"result",
"=",
"{",
"'status'",
":",
"STATUS_RUNNING",
"}",
"elif",
"status",
"==",
"'FAILED'",
":",
"result",
"=",
"{",
"'status'",
":",
"STATUS_FAIL",
"}",
"else",
":",
"raise",
"RuntimeError",
"(",
"'unrecognized status: %s'",
"%",
"status",
")",
"# the vals key in the trials dict is basically just the params",
"# dict, but enum variables (hyperopt hp.choice() nodes) are",
"# different, because the index of the parameter is specified",
"# in vals, not the parameter itself.",
"vals",
"=",
"{",
"}",
"for",
"var",
"in",
"searchspace",
":",
"if",
"isinstance",
"(",
"var",
",",
"EnumVariable",
")",
":",
"# get the index in the choices of the parameter, and use",
"# that.",
"matches",
"=",
"[",
"i",
"for",
"i",
",",
"c",
"in",
"enumerate",
"(",
"var",
".",
"choices",
")",
"if",
"c",
"==",
"params",
"[",
"var",
".",
"name",
"]",
"]",
"assert",
"len",
"(",
"matches",
")",
"==",
"1",
"vals",
"[",
"var",
".",
"name",
"]",
"=",
"matches",
"else",
":",
"# the other big difference is that all of the param values",
"# are wrapped in length-1 lists.",
"vals",
"[",
"var",
".",
"name",
"]",
"=",
"[",
"params",
"[",
"var",
".",
"name",
"]",
"]",
"trials",
".",
"insert_trial_doc",
"(",
"{",
"'misc'",
":",
"{",
"'cmd'",
":",
"(",
"'domain_attachment'",
",",
"'FMinIter_Domain'",
")",
",",
"'idxs'",
":",
"dict",
"(",
"(",
"k",
",",
"[",
"i",
"]",
")",
"for",
"k",
"in",
"hp_searchspace",
".",
"keys",
"(",
")",
")",
",",
"'tid'",
":",
"i",
",",
"'vals'",
":",
"vals",
",",
"'workdir'",
":",
"None",
"}",
",",
"'result'",
":",
"result",
",",
"'tid'",
":",
"i",
",",
"# bunch of fixed fields that hyperopt seems to require",
"'owner'",
":",
"None",
",",
"'spec'",
":",
"None",
",",
"'state'",
":",
"2",
",",
"'book_time'",
":",
"None",
",",
"'exp_key'",
":",
"None",
",",
"'refresh_time'",
":",
"None",
",",
"'version'",
":",
"0",
"}",
")",
"trials",
".",
"refresh",
"(",
")",
"chosen_params_container",
"=",
"[",
"]",
"def",
"suggest",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"tpe",
".",
"suggest",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
",",
"gamma",
"=",
"self",
".",
"gamma",
",",
"n_startup_jobs",
"=",
"self",
".",
"seeds",
")",
"def",
"mock_fn",
"(",
"x",
")",
":",
"# http://stackoverflow.com/a/3190783/1079728",
"# to get around no nonlocal keywork in python2",
"chosen_params_container",
".",
"append",
"(",
"x",
")",
"return",
"0",
"fmin",
"(",
"fn",
"=",
"mock_fn",
",",
"algo",
"=",
"tpe",
".",
"suggest",
",",
"space",
"=",
"hp_searchspace",
",",
"trials",
"=",
"trials",
",",
"max_evals",
"=",
"len",
"(",
"trials",
".",
"trials",
")",
"+",
"1",
",",
"*",
"*",
"self",
".",
"_hyperopt_fmin_random_kwarg",
"(",
"random",
")",
")",
"chosen_params",
"=",
"chosen_params_container",
"[",
"0",
"]",
"return",
"chosen_params"
] | 41.165138 | 19.40367 |
def live_unread_notification_list(request):
''' Return a json with a unread notification list '''
try:
user_is_authenticated = request.user.is_authenticated()
except TypeError: # Django >= 1.11
user_is_authenticated = request.user.is_authenticated
if not user_is_authenticated:
data = {
'unread_count': 0,
'unread_list': []
}
return JsonResponse(data)
default_num_to_fetch = get_config()['NUM_TO_FETCH']
try:
# If they don't specify, make it 5.
num_to_fetch = request.GET.get('max', default_num_to_fetch)
num_to_fetch = int(num_to_fetch)
if not (1 <= num_to_fetch <= 100):
num_to_fetch = default_num_to_fetch
except ValueError: # If casting to an int fails.
num_to_fetch = default_num_to_fetch
unread_list = []
for notification in request.user.notifications.unread()[0:num_to_fetch]:
struct = model_to_dict(notification)
struct['slug'] = id2slug(notification.id)
if notification.actor:
struct['actor'] = str(notification.actor)
if notification.target:
struct['target'] = str(notification.target)
if notification.action_object:
struct['action_object'] = str(notification.action_object)
if notification.data:
struct['data'] = notification.data
unread_list.append(struct)
if request.GET.get('mark_as_read'):
notification.mark_as_read()
data = {
'unread_count': request.user.notifications.unread().count(),
'unread_list': unread_list
}
return JsonResponse(data)
|
[
"def",
"live_unread_notification_list",
"(",
"request",
")",
":",
"try",
":",
"user_is_authenticated",
"=",
"request",
".",
"user",
".",
"is_authenticated",
"(",
")",
"except",
"TypeError",
":",
"# Django >= 1.11",
"user_is_authenticated",
"=",
"request",
".",
"user",
".",
"is_authenticated",
"if",
"not",
"user_is_authenticated",
":",
"data",
"=",
"{",
"'unread_count'",
":",
"0",
",",
"'unread_list'",
":",
"[",
"]",
"}",
"return",
"JsonResponse",
"(",
"data",
")",
"default_num_to_fetch",
"=",
"get_config",
"(",
")",
"[",
"'NUM_TO_FETCH'",
"]",
"try",
":",
"# If they don't specify, make it 5.",
"num_to_fetch",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'max'",
",",
"default_num_to_fetch",
")",
"num_to_fetch",
"=",
"int",
"(",
"num_to_fetch",
")",
"if",
"not",
"(",
"1",
"<=",
"num_to_fetch",
"<=",
"100",
")",
":",
"num_to_fetch",
"=",
"default_num_to_fetch",
"except",
"ValueError",
":",
"# If casting to an int fails.",
"num_to_fetch",
"=",
"default_num_to_fetch",
"unread_list",
"=",
"[",
"]",
"for",
"notification",
"in",
"request",
".",
"user",
".",
"notifications",
".",
"unread",
"(",
")",
"[",
"0",
":",
"num_to_fetch",
"]",
":",
"struct",
"=",
"model_to_dict",
"(",
"notification",
")",
"struct",
"[",
"'slug'",
"]",
"=",
"id2slug",
"(",
"notification",
".",
"id",
")",
"if",
"notification",
".",
"actor",
":",
"struct",
"[",
"'actor'",
"]",
"=",
"str",
"(",
"notification",
".",
"actor",
")",
"if",
"notification",
".",
"target",
":",
"struct",
"[",
"'target'",
"]",
"=",
"str",
"(",
"notification",
".",
"target",
")",
"if",
"notification",
".",
"action_object",
":",
"struct",
"[",
"'action_object'",
"]",
"=",
"str",
"(",
"notification",
".",
"action_object",
")",
"if",
"notification",
".",
"data",
":",
"struct",
"[",
"'data'",
"]",
"=",
"notification",
".",
"data",
"unread_list",
".",
"append",
"(",
"struct",
")",
"if",
"request",
".",
"GET",
".",
"get",
"(",
"'mark_as_read'",
")",
":",
"notification",
".",
"mark_as_read",
"(",
")",
"data",
"=",
"{",
"'unread_count'",
":",
"request",
".",
"user",
".",
"notifications",
".",
"unread",
"(",
")",
".",
"count",
"(",
")",
",",
"'unread_list'",
":",
"unread_list",
"}",
"return",
"JsonResponse",
"(",
"data",
")"
] | 36.066667 | 16.244444 |
def analyze_bash_vars(job_input_file, job_homedir):
'''
This function examines the input file, and calculates variables to
instantiate in the shell environment. It is called right before starting the
execution of an app in a worker.
For each input key, we want to have
$var
$var_filename
$var_prefix
remove last dot (+gz), and/or remove patterns
$var_path
$HOME/in/var/$var_filename
For example,
$HOME/in/genes/A.txt
B.txt
export genes=('{"$dnanexus_link": "file-xxxx"}' '{"$dnanexus_link": "file-yyyy"}')
export genes_filename=("A.txt" "B.txt")
export genes_prefix=("A" "B")
export genes_path=("$HOME/in/genes/A.txt" "$HOME/in/genes/B.txt")
If there are patterns defined in the input spec, then the prefix respects them.
Here are several examples, where the patterns are:
*.bam, *.bwa-index.tar.gz, foo*.sam, z*ra.sam
file name prefix matches
foo.zed.bam foo.zed *.bam
xxx.bwa-index.tar.gz xxx *.bwa-index.tar.gz
food.sam food foo*.sam
zebra.sam zebra z*ra.sam
xx.c xx
xx.c.gz xx
The only patterns we recognize are of the form x*.y. For example:
legal *.sam, *.c.py, foo*.sam, a*b*c.baz
ignored uu.txt x???.tar mon[a-z].py
'''
_, file_entries, rest_hash = get_job_input_filenames(job_input_file)
patterns_dict = get_input_spec_patterns()
# Note: there may be multiple matches, choose the shortest prefix.
def get_prefix(basename, key):
best_prefix = None
patterns = patterns_dict.get(key)
if patterns is not None:
for pattern in patterns:
if fnmatch.fnmatch(basename, pattern):
_, _, right_piece = pattern.rpartition("*")
best_prefix = choose_shorter_string(best_prefix, basename[:-len(right_piece)])
if best_prefix is not None:
return best_prefix
else:
# no matching rule
parts = os.path.splitext(basename)
if parts[1] == ".gz":
parts = os.path.splitext(parts[0])
return parts[0]
def factory():
return {'handler': [], 'basename': [], 'prefix': [], 'path': []}
file_key_descs = collections.defaultdict(factory)
rel_home_dir = get_input_dir(job_homedir)
for key, entries in list(file_entries.items()):
for entry in entries:
filename = entry['trg_fname']
basename = os.path.basename(filename)
prefix = get_prefix(basename, key)
k_desc = file_key_descs[key]
k_desc['handler'].append(entry['handler'])
k_desc['basename'].append(basename)
k_desc['prefix'].append(prefix)
k_desc['path'].append(os.path.join(rel_home_dir, filename))
return file_key_descs, rest_hash
|
[
"def",
"analyze_bash_vars",
"(",
"job_input_file",
",",
"job_homedir",
")",
":",
"_",
",",
"file_entries",
",",
"rest_hash",
"=",
"get_job_input_filenames",
"(",
"job_input_file",
")",
"patterns_dict",
"=",
"get_input_spec_patterns",
"(",
")",
"# Note: there may be multiple matches, choose the shortest prefix.",
"def",
"get_prefix",
"(",
"basename",
",",
"key",
")",
":",
"best_prefix",
"=",
"None",
"patterns",
"=",
"patterns_dict",
".",
"get",
"(",
"key",
")",
"if",
"patterns",
"is",
"not",
"None",
":",
"for",
"pattern",
"in",
"patterns",
":",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"basename",
",",
"pattern",
")",
":",
"_",
",",
"_",
",",
"right_piece",
"=",
"pattern",
".",
"rpartition",
"(",
"\"*\"",
")",
"best_prefix",
"=",
"choose_shorter_string",
"(",
"best_prefix",
",",
"basename",
"[",
":",
"-",
"len",
"(",
"right_piece",
")",
"]",
")",
"if",
"best_prefix",
"is",
"not",
"None",
":",
"return",
"best_prefix",
"else",
":",
"# no matching rule",
"parts",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"basename",
")",
"if",
"parts",
"[",
"1",
"]",
"==",
"\".gz\"",
":",
"parts",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"parts",
"[",
"0",
"]",
")",
"return",
"parts",
"[",
"0",
"]",
"def",
"factory",
"(",
")",
":",
"return",
"{",
"'handler'",
":",
"[",
"]",
",",
"'basename'",
":",
"[",
"]",
",",
"'prefix'",
":",
"[",
"]",
",",
"'path'",
":",
"[",
"]",
"}",
"file_key_descs",
"=",
"collections",
".",
"defaultdict",
"(",
"factory",
")",
"rel_home_dir",
"=",
"get_input_dir",
"(",
"job_homedir",
")",
"for",
"key",
",",
"entries",
"in",
"list",
"(",
"file_entries",
".",
"items",
"(",
")",
")",
":",
"for",
"entry",
"in",
"entries",
":",
"filename",
"=",
"entry",
"[",
"'trg_fname'",
"]",
"basename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"filename",
")",
"prefix",
"=",
"get_prefix",
"(",
"basename",
",",
"key",
")",
"k_desc",
"=",
"file_key_descs",
"[",
"key",
"]",
"k_desc",
"[",
"'handler'",
"]",
".",
"append",
"(",
"entry",
"[",
"'handler'",
"]",
")",
"k_desc",
"[",
"'basename'",
"]",
".",
"append",
"(",
"basename",
")",
"k_desc",
"[",
"'prefix'",
"]",
".",
"append",
"(",
"prefix",
")",
"k_desc",
"[",
"'path'",
"]",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"rel_home_dir",
",",
"filename",
")",
")",
"return",
"file_key_descs",
",",
"rest_hash"
] | 38.853333 | 18.24 |
def _wakeup(self):
'''
issue wakeup command to device to take out of standby mode.
'''
log.info("send: WAKEUP")
for i in xrange(3):
self.port.write('\n') # wakeup device
ack = self.port.read(len(self.WAKE_ACK)) # read wakeup string
log_raw('read', ack)
if ack == self.WAKE_ACK:
return
raise NoDeviceException('Can not access weather station')
|
[
"def",
"_wakeup",
"(",
"self",
")",
":",
"log",
".",
"info",
"(",
"\"send: WAKEUP\"",
")",
"for",
"i",
"in",
"xrange",
"(",
"3",
")",
":",
"self",
".",
"port",
".",
"write",
"(",
"'\\n'",
")",
"# wakeup device",
"ack",
"=",
"self",
".",
"port",
".",
"read",
"(",
"len",
"(",
"self",
".",
"WAKE_ACK",
")",
")",
"# read wakeup string",
"log_raw",
"(",
"'read'",
",",
"ack",
")",
"if",
"ack",
"==",
"self",
".",
"WAKE_ACK",
":",
"return",
"raise",
"NoDeviceException",
"(",
"'Can not access weather station'",
")"
] | 37.083333 | 18.916667 |
def choose(self, choose_from):
"""given a mapping of implementations
choose one based on the current settings
returns a key value pair
"""
for choice in self.elements:
if choice in choose_from:
return ImplementationChoice(choice, choose_from[choice])
raise LookupError(self.elements, choose_from.keys())
|
[
"def",
"choose",
"(",
"self",
",",
"choose_from",
")",
":",
"for",
"choice",
"in",
"self",
".",
"elements",
":",
"if",
"choice",
"in",
"choose_from",
":",
"return",
"ImplementationChoice",
"(",
"choice",
",",
"choose_from",
"[",
"choice",
"]",
")",
"raise",
"LookupError",
"(",
"self",
".",
"elements",
",",
"choose_from",
".",
"keys",
"(",
")",
")"
] | 37.1 | 12.5 |
def create_page(name, parent_id, space, content):
"""Create a page in Confluence.
Parameters:
- name: name of the Confluence page to create.
- parent_id: ID of the intended parent of the page.
- space: key of the space where the page will be created.
- content: XHTML content to be written to the page.
Notes: the page id can be obtained by getting ["id"] from the returned JSON.
"""
data = {}
data["type"] = "page"
data["title"] = name
data["ancestors"] = [{"id": str(parent_id)}]
data["space"] = {"key": space}
data["body"] = {"storage": {"value": content, "representation": "storage"}}
return _api.rest("/", "POST", _json.dumps(data))
|
[
"def",
"create_page",
"(",
"name",
",",
"parent_id",
",",
"space",
",",
"content",
")",
":",
"data",
"=",
"{",
"}",
"data",
"[",
"\"type\"",
"]",
"=",
"\"page\"",
"data",
"[",
"\"title\"",
"]",
"=",
"name",
"data",
"[",
"\"ancestors\"",
"]",
"=",
"[",
"{",
"\"id\"",
":",
"str",
"(",
"parent_id",
")",
"}",
"]",
"data",
"[",
"\"space\"",
"]",
"=",
"{",
"\"key\"",
":",
"space",
"}",
"data",
"[",
"\"body\"",
"]",
"=",
"{",
"\"storage\"",
":",
"{",
"\"value\"",
":",
"content",
",",
"\"representation\"",
":",
"\"storage\"",
"}",
"}",
"return",
"_api",
".",
"rest",
"(",
"\"/\"",
",",
"\"POST\"",
",",
"_json",
".",
"dumps",
"(",
"data",
")",
")"
] | 42.625 | 16.125 |
def _deserialize(self, response):
"""Attempt to deserialize resource from response.
:param requests.Response response: latest REST call response.
"""
# Hacking response with initial status_code
previous_status = response.status_code
response.status_code = self.initial_status_code
resource = self.get_outputs(response)
response.status_code = previous_status
# Hack for Storage or SQL, to workaround the bug in the Python generator
if resource is None:
previous_status = response.status_code
for status_code_to_test in [200, 201]:
try:
response.status_code = status_code_to_test
resource = self.get_outputs(response)
except ClientException:
pass
else:
return resource
finally:
response.status_code = previous_status
return resource
|
[
"def",
"_deserialize",
"(",
"self",
",",
"response",
")",
":",
"# Hacking response with initial status_code",
"previous_status",
"=",
"response",
".",
"status_code",
"response",
".",
"status_code",
"=",
"self",
".",
"initial_status_code",
"resource",
"=",
"self",
".",
"get_outputs",
"(",
"response",
")",
"response",
".",
"status_code",
"=",
"previous_status",
"# Hack for Storage or SQL, to workaround the bug in the Python generator",
"if",
"resource",
"is",
"None",
":",
"previous_status",
"=",
"response",
".",
"status_code",
"for",
"status_code_to_test",
"in",
"[",
"200",
",",
"201",
"]",
":",
"try",
":",
"response",
".",
"status_code",
"=",
"status_code_to_test",
"resource",
"=",
"self",
".",
"get_outputs",
"(",
"response",
")",
"except",
"ClientException",
":",
"pass",
"else",
":",
"return",
"resource",
"finally",
":",
"response",
".",
"status_code",
"=",
"previous_status",
"return",
"resource"
] | 39.36 | 15.28 |
def change(connect_spec, dn, before, after):
'''Modify an entry in an LDAP database.
This does the same thing as :py:func:`modify`, but with a simpler
interface. Instead of taking a list of directives, it takes a
before and after view of an entry, determines the differences
between the two, computes the directives, and executes them.
Any attribute value present in ``before`` but missing in ``after``
is deleted. Any attribute value present in ``after`` but missing
in ``before`` is added. Any attribute value in the database that
is not mentioned in either ``before`` or ``after`` is not altered.
Any attribute value that is present in both ``before`` and
``after`` is ignored, regardless of whether that attribute value
exists in the database.
:param connect_spec:
See the documentation for the ``connect_spec`` parameter for
:py:func:`connect`.
:param dn:
Distinguished name of the entry.
:param before:
The expected state of the entry before modification. This is
a dict mapping each attribute name to an iterable of values.
:param after:
The desired state of the entry after modification. This is a
dict mapping each attribute name to an iterable of values.
:returns:
``True`` if successful, raises an exception otherwise.
CLI example:
.. code-block:: bash
salt '*' ldap3.change "{
'url': 'ldaps://ldap.example.com/',
'bind': {
'method': 'simple',
'password': 'secret'}
}" dn='cn=admin,dc=example,dc=com'
before="{'example_value': 'before_val'}"
after="{'example_value': 'after_val'}"
'''
l = connect(connect_spec)
# convert the "iterable of values" to lists in case that's what
# modifyModlist() expects (also to ensure that the caller's dicts
# are not modified)
before = dict(((attr, salt.utils.data.encode(list(vals)))
for attr, vals in six.iteritems(before)))
after = dict(((attr, salt.utils.data.encode(list(vals)))
for attr, vals in six.iteritems(after)))
if 'unicodePwd' in after:
after['unicodePwd'] = [_format_unicode_password(x) for x in after['unicodePwd']]
modlist = ldap.modlist.modifyModlist(before, after)
try:
l.c.modify_s(dn, modlist)
except ldap.LDAPError as e:
_convert_exception(e)
return True
|
[
"def",
"change",
"(",
"connect_spec",
",",
"dn",
",",
"before",
",",
"after",
")",
":",
"l",
"=",
"connect",
"(",
"connect_spec",
")",
"# convert the \"iterable of values\" to lists in case that's what",
"# modifyModlist() expects (also to ensure that the caller's dicts",
"# are not modified)",
"before",
"=",
"dict",
"(",
"(",
"(",
"attr",
",",
"salt",
".",
"utils",
".",
"data",
".",
"encode",
"(",
"list",
"(",
"vals",
")",
")",
")",
"for",
"attr",
",",
"vals",
"in",
"six",
".",
"iteritems",
"(",
"before",
")",
")",
")",
"after",
"=",
"dict",
"(",
"(",
"(",
"attr",
",",
"salt",
".",
"utils",
".",
"data",
".",
"encode",
"(",
"list",
"(",
"vals",
")",
")",
")",
"for",
"attr",
",",
"vals",
"in",
"six",
".",
"iteritems",
"(",
"after",
")",
")",
")",
"if",
"'unicodePwd'",
"in",
"after",
":",
"after",
"[",
"'unicodePwd'",
"]",
"=",
"[",
"_format_unicode_password",
"(",
"x",
")",
"for",
"x",
"in",
"after",
"[",
"'unicodePwd'",
"]",
"]",
"modlist",
"=",
"ldap",
".",
"modlist",
".",
"modifyModlist",
"(",
"before",
",",
"after",
")",
"try",
":",
"l",
".",
"c",
".",
"modify_s",
"(",
"dn",
",",
"modlist",
")",
"except",
"ldap",
".",
"LDAPError",
"as",
"e",
":",
"_convert_exception",
"(",
"e",
")",
"return",
"True"
] | 36.530303 | 23.530303 |
def main(args=None):
"""Main function."""
parser = get_parser()
args = parser.parse_args(args=args)
Logger.set_level(args.level)
colorama_args = {'autoreset': True}
if args.no_color:
colorama_args['strip'] = True
colorama.init(**colorama_args)
config = None
if args.no_config:
logger.info('--no-config flag used, use default configuration')
if args.input_file:
logger.info('Input file specified: %s' % args.input_file)
file_path = args.input_file
else:
logger.info('No input file specified, will read standard input')
file_path = sys.stdin
config = Config.default_config(file_path)
else:
if args.config_file:
logger.info('Configuration file specified: %s' % args.config_file)
config_file = args.config_file
else:
logger.info('No configuration file specified, searching')
config_file = Config.find()
if config_file:
logger.info('Load configuration from %s' % config_file)
config = Config.from_file(config_file)
if config is None:
logger.info('No configuration file found, use default one')
config = Config.default_config()
logger.debug('Configuration = {}'.format(config))
logger.debug('Plugins loaded = {}'.format(config.plugins))
if args.list_plugins:
logger.info('Print list of plugins')
config.print_plugins()
return 0
logger.info('Run analysis')
analysis = Analysis(config)
try:
analysis.run(verbose=False)
logger.info('Analysis successful: %s' % analysis.successful)
logger.info('Output results as TAP')
analysis.output_tap()
return 0 if analysis.successful else 1
except KeyboardInterrupt:
logger.info('Keyboard interruption, aborting')
return 130
|
[
"def",
"main",
"(",
"args",
"=",
"None",
")",
":",
"parser",
"=",
"get_parser",
"(",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"args",
"=",
"args",
")",
"Logger",
".",
"set_level",
"(",
"args",
".",
"level",
")",
"colorama_args",
"=",
"{",
"'autoreset'",
":",
"True",
"}",
"if",
"args",
".",
"no_color",
":",
"colorama_args",
"[",
"'strip'",
"]",
"=",
"True",
"colorama",
".",
"init",
"(",
"*",
"*",
"colorama_args",
")",
"config",
"=",
"None",
"if",
"args",
".",
"no_config",
":",
"logger",
".",
"info",
"(",
"'--no-config flag used, use default configuration'",
")",
"if",
"args",
".",
"input_file",
":",
"logger",
".",
"info",
"(",
"'Input file specified: %s'",
"%",
"args",
".",
"input_file",
")",
"file_path",
"=",
"args",
".",
"input_file",
"else",
":",
"logger",
".",
"info",
"(",
"'No input file specified, will read standard input'",
")",
"file_path",
"=",
"sys",
".",
"stdin",
"config",
"=",
"Config",
".",
"default_config",
"(",
"file_path",
")",
"else",
":",
"if",
"args",
".",
"config_file",
":",
"logger",
".",
"info",
"(",
"'Configuration file specified: %s'",
"%",
"args",
".",
"config_file",
")",
"config_file",
"=",
"args",
".",
"config_file",
"else",
":",
"logger",
".",
"info",
"(",
"'No configuration file specified, searching'",
")",
"config_file",
"=",
"Config",
".",
"find",
"(",
")",
"if",
"config_file",
":",
"logger",
".",
"info",
"(",
"'Load configuration from %s'",
"%",
"config_file",
")",
"config",
"=",
"Config",
".",
"from_file",
"(",
"config_file",
")",
"if",
"config",
"is",
"None",
":",
"logger",
".",
"info",
"(",
"'No configuration file found, use default one'",
")",
"config",
"=",
"Config",
".",
"default_config",
"(",
")",
"logger",
".",
"debug",
"(",
"'Configuration = {}'",
".",
"format",
"(",
"config",
")",
")",
"logger",
".",
"debug",
"(",
"'Plugins loaded = {}'",
".",
"format",
"(",
"config",
".",
"plugins",
")",
")",
"if",
"args",
".",
"list_plugins",
":",
"logger",
".",
"info",
"(",
"'Print list of plugins'",
")",
"config",
".",
"print_plugins",
"(",
")",
"return",
"0",
"logger",
".",
"info",
"(",
"'Run analysis'",
")",
"analysis",
"=",
"Analysis",
"(",
"config",
")",
"try",
":",
"analysis",
".",
"run",
"(",
"verbose",
"=",
"False",
")",
"logger",
".",
"info",
"(",
"'Analysis successful: %s'",
"%",
"analysis",
".",
"successful",
")",
"logger",
".",
"info",
"(",
"'Output results as TAP'",
")",
"analysis",
".",
"output_tap",
"(",
")",
"return",
"0",
"if",
"analysis",
".",
"successful",
"else",
"1",
"except",
"KeyboardInterrupt",
":",
"logger",
".",
"info",
"(",
"'Keyboard interruption, aborting'",
")",
"return",
"130"
] | 34.611111 | 17.574074 |
def patch_ironic_ramdisk(self):
"""Clean the disk before flushing the new image.
See: https://bugs.launchpad.net/ironic-lib/+bug/1550604
"""
tmpdir = self.run('mktemp -d')[0].rstrip('\n')
self.run('cd {tmpdir}; zcat /home/stack/ironic-python-agent.initramfs| cpio -id'.format(tmpdir=tmpdir))
self.send_file(pkg_data_filename('static', 'ironic-wipefs.patch'), '/tmp/ironic-wipefs.patch')
self.run('cd {tmpdir}; patch -p0 < /tmp/ironic-wipefs.patch'.format(tmpdir=tmpdir))
self.run('cd {tmpdir}; find . | cpio --create --format=newc > /home/stack/ironic-python-agent.initramfs'.format(tmpdir=tmpdir))
|
[
"def",
"patch_ironic_ramdisk",
"(",
"self",
")",
":",
"tmpdir",
"=",
"self",
".",
"run",
"(",
"'mktemp -d'",
")",
"[",
"0",
"]",
".",
"rstrip",
"(",
"'\\n'",
")",
"self",
".",
"run",
"(",
"'cd {tmpdir}; zcat /home/stack/ironic-python-agent.initramfs| cpio -id'",
".",
"format",
"(",
"tmpdir",
"=",
"tmpdir",
")",
")",
"self",
".",
"send_file",
"(",
"pkg_data_filename",
"(",
"'static'",
",",
"'ironic-wipefs.patch'",
")",
",",
"'/tmp/ironic-wipefs.patch'",
")",
"self",
".",
"run",
"(",
"'cd {tmpdir}; patch -p0 < /tmp/ironic-wipefs.patch'",
".",
"format",
"(",
"tmpdir",
"=",
"tmpdir",
")",
")",
"self",
".",
"run",
"(",
"'cd {tmpdir}; find . | cpio --create --format=newc > /home/stack/ironic-python-agent.initramfs'",
".",
"format",
"(",
"tmpdir",
"=",
"tmpdir",
")",
")"
] | 65.4 | 36.5 |
def is_global(pe_pe):
'''
Check if a PE_PE is globally defined, i.e. not inside a C_C
'''
if type(pe_pe).__name__ != 'PE_PE':
pe_pe = one(pe_pe).PE_PE[8001]()
if one(pe_pe).C_C[8003]():
return False
pe_pe = one(pe_pe).EP_PKG[8000].PE_PE[8001]()
if not pe_pe:
return True
return is_global(pe_pe)
|
[
"def",
"is_global",
"(",
"pe_pe",
")",
":",
"if",
"type",
"(",
"pe_pe",
")",
".",
"__name__",
"!=",
"'PE_PE'",
":",
"pe_pe",
"=",
"one",
"(",
"pe_pe",
")",
".",
"PE_PE",
"[",
"8001",
"]",
"(",
")",
"if",
"one",
"(",
"pe_pe",
")",
".",
"C_C",
"[",
"8003",
"]",
"(",
")",
":",
"return",
"False",
"pe_pe",
"=",
"one",
"(",
"pe_pe",
")",
".",
"EP_PKG",
"[",
"8000",
"]",
".",
"PE_PE",
"[",
"8001",
"]",
"(",
")",
"if",
"not",
"pe_pe",
":",
"return",
"True",
"return",
"is_global",
"(",
"pe_pe",
")"
] | 23.4 | 20.866667 |
def _close(self):
""" Close connection to remote host. """
if self._process is None:
return
self.quit()
self._process.stdin.close()
logger.debug("Waiting for ssh process to finish...")
self._process.wait() # Wait for ssh session to finish.
# self._process.terminate()
# self._process.kill()
self._process = None
|
[
"def",
"_close",
"(",
"self",
")",
":",
"if",
"self",
".",
"_process",
"is",
"None",
":",
"return",
"self",
".",
"quit",
"(",
")",
"self",
".",
"_process",
".",
"stdin",
".",
"close",
"(",
")",
"logger",
".",
"debug",
"(",
"\"Waiting for ssh process to finish...\"",
")",
"self",
".",
"_process",
".",
"wait",
"(",
")",
"# Wait for ssh session to finish.",
"# self._process.terminate()",
"# self._process.kill()",
"self",
".",
"_process",
"=",
"None"
] | 24.125 | 21.75 |
def _add_numeric_operations(cls):
"""
Add numeric operations to the GroupBy generically.
"""
def groupby_function(name, alias, npfunc,
numeric_only=True, _convert=False,
min_count=-1):
_local_template = "Compute %(f)s of group values"
@Substitution(name='groupby', f=name)
@Appender(_common_see_also)
@Appender(_local_template)
def f(self, **kwargs):
if 'numeric_only' not in kwargs:
kwargs['numeric_only'] = numeric_only
if 'min_count' not in kwargs:
kwargs['min_count'] = min_count
self._set_group_selection()
try:
return self._cython_agg_general(
alias, alt=npfunc, **kwargs)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(
lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result._convert(datetime=True)
return result
set_function_name(f, name, cls)
return f
def first_compat(x, axis=0):
def first(x):
x = x.to_numpy()
x = x[notna(x)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(x, DataFrame):
return x.apply(first, axis=axis)
else:
return first(x)
def last_compat(x, axis=0):
def last(x):
x = x.to_numpy()
x = x[notna(x)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(x, DataFrame):
return x.apply(last, axis=axis)
else:
return last(x)
cls.sum = groupby_function('sum', 'add', np.sum, min_count=0)
cls.prod = groupby_function('prod', 'prod', np.prod, min_count=0)
cls.min = groupby_function('min', 'min', np.min, numeric_only=False)
cls.max = groupby_function('max', 'max', np.max, numeric_only=False)
cls.first = groupby_function('first', 'first', first_compat,
numeric_only=False)
cls.last = groupby_function('last', 'last', last_compat,
numeric_only=False)
|
[
"def",
"_add_numeric_operations",
"(",
"cls",
")",
":",
"def",
"groupby_function",
"(",
"name",
",",
"alias",
",",
"npfunc",
",",
"numeric_only",
"=",
"True",
",",
"_convert",
"=",
"False",
",",
"min_count",
"=",
"-",
"1",
")",
":",
"_local_template",
"=",
"\"Compute %(f)s of group values\"",
"@",
"Substitution",
"(",
"name",
"=",
"'groupby'",
",",
"f",
"=",
"name",
")",
"@",
"Appender",
"(",
"_common_see_also",
")",
"@",
"Appender",
"(",
"_local_template",
")",
"def",
"f",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'numeric_only'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'numeric_only'",
"]",
"=",
"numeric_only",
"if",
"'min_count'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'min_count'",
"]",
"=",
"min_count",
"self",
".",
"_set_group_selection",
"(",
")",
"try",
":",
"return",
"self",
".",
"_cython_agg_general",
"(",
"alias",
",",
"alt",
"=",
"npfunc",
",",
"*",
"*",
"kwargs",
")",
"except",
"AssertionError",
"as",
"e",
":",
"raise",
"SpecificationError",
"(",
"str",
"(",
"e",
")",
")",
"except",
"Exception",
":",
"result",
"=",
"self",
".",
"aggregate",
"(",
"lambda",
"x",
":",
"npfunc",
"(",
"x",
",",
"axis",
"=",
"self",
".",
"axis",
")",
")",
"if",
"_convert",
":",
"result",
"=",
"result",
".",
"_convert",
"(",
"datetime",
"=",
"True",
")",
"return",
"result",
"set_function_name",
"(",
"f",
",",
"name",
",",
"cls",
")",
"return",
"f",
"def",
"first_compat",
"(",
"x",
",",
"axis",
"=",
"0",
")",
":",
"def",
"first",
"(",
"x",
")",
":",
"x",
"=",
"x",
".",
"to_numpy",
"(",
")",
"x",
"=",
"x",
"[",
"notna",
"(",
"x",
")",
"]",
"if",
"len",
"(",
"x",
")",
"==",
"0",
":",
"return",
"np",
".",
"nan",
"return",
"x",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"x",
",",
"DataFrame",
")",
":",
"return",
"x",
".",
"apply",
"(",
"first",
",",
"axis",
"=",
"axis",
")",
"else",
":",
"return",
"first",
"(",
"x",
")",
"def",
"last_compat",
"(",
"x",
",",
"axis",
"=",
"0",
")",
":",
"def",
"last",
"(",
"x",
")",
":",
"x",
"=",
"x",
".",
"to_numpy",
"(",
")",
"x",
"=",
"x",
"[",
"notna",
"(",
"x",
")",
"]",
"if",
"len",
"(",
"x",
")",
"==",
"0",
":",
"return",
"np",
".",
"nan",
"return",
"x",
"[",
"-",
"1",
"]",
"if",
"isinstance",
"(",
"x",
",",
"DataFrame",
")",
":",
"return",
"x",
".",
"apply",
"(",
"last",
",",
"axis",
"=",
"axis",
")",
"else",
":",
"return",
"last",
"(",
"x",
")",
"cls",
".",
"sum",
"=",
"groupby_function",
"(",
"'sum'",
",",
"'add'",
",",
"np",
".",
"sum",
",",
"min_count",
"=",
"0",
")",
"cls",
".",
"prod",
"=",
"groupby_function",
"(",
"'prod'",
",",
"'prod'",
",",
"np",
".",
"prod",
",",
"min_count",
"=",
"0",
")",
"cls",
".",
"min",
"=",
"groupby_function",
"(",
"'min'",
",",
"'min'",
",",
"np",
".",
"min",
",",
"numeric_only",
"=",
"False",
")",
"cls",
".",
"max",
"=",
"groupby_function",
"(",
"'max'",
",",
"'max'",
",",
"np",
".",
"max",
",",
"numeric_only",
"=",
"False",
")",
"cls",
".",
"first",
"=",
"groupby_function",
"(",
"'first'",
",",
"'first'",
",",
"first_compat",
",",
"numeric_only",
"=",
"False",
")",
"cls",
".",
"last",
"=",
"groupby_function",
"(",
"'last'",
",",
"'last'",
",",
"last_compat",
",",
"numeric_only",
"=",
"False",
")"
] | 34 | 17.324324 |
def add_pk_if_required(db, table, name):
"""Return a class deriving from our Model class as well as the SQLAlchemy
model.
:param `sqlalchemy.schema.Table` table: table to create primary key for
:param table: table to create primary key for
"""
db.metadata.reflect(bind=db.engine)
cls_dict = {'__tablename__': name}
if not table.primary_key:
for column in table.columns:
column.primary_key = True
Table(name, db.metadata, *table.columns, extend_existing=True)
cls_dict['__table__'] = table
db.metadata.create_all(bind=db.engine)
return type(str(name), (sandman_model, db.Model), cls_dict)
|
[
"def",
"add_pk_if_required",
"(",
"db",
",",
"table",
",",
"name",
")",
":",
"db",
".",
"metadata",
".",
"reflect",
"(",
"bind",
"=",
"db",
".",
"engine",
")",
"cls_dict",
"=",
"{",
"'__tablename__'",
":",
"name",
"}",
"if",
"not",
"table",
".",
"primary_key",
":",
"for",
"column",
"in",
"table",
".",
"columns",
":",
"column",
".",
"primary_key",
"=",
"True",
"Table",
"(",
"name",
",",
"db",
".",
"metadata",
",",
"*",
"table",
".",
"columns",
",",
"extend_existing",
"=",
"True",
")",
"cls_dict",
"[",
"'__table__'",
"]",
"=",
"table",
"db",
".",
"metadata",
".",
"create_all",
"(",
"bind",
"=",
"db",
".",
"engine",
")",
"return",
"type",
"(",
"str",
"(",
"name",
")",
",",
"(",
"sandman_model",
",",
"db",
".",
"Model",
")",
",",
"cls_dict",
")"
] | 36.333333 | 15.444444 |
def unionstore(self, dest, *others):
"""
Store the union of the current set and one or more
others in a new key.
:param dest: the name of the key to store union
:param others: One or more :py:class:`Set` instances
:returns: A :py:class:`Set` referencing ``dest``.
"""
keys = [self.key]
keys.extend([other.key for other in others])
self.database.sunionstore(dest, keys)
return self.database.Set(dest)
|
[
"def",
"unionstore",
"(",
"self",
",",
"dest",
",",
"*",
"others",
")",
":",
"keys",
"=",
"[",
"self",
".",
"key",
"]",
"keys",
".",
"extend",
"(",
"[",
"other",
".",
"key",
"for",
"other",
"in",
"others",
"]",
")",
"self",
".",
"database",
".",
"sunionstore",
"(",
"dest",
",",
"keys",
")",
"return",
"self",
".",
"database",
".",
"Set",
"(",
"dest",
")"
] | 36.615385 | 12.307692 |
def register_edited_channel_post_handler(self, callback, *custom_filters, commands=None, regexp=None,
content_types=None, state=None, run_task=None, **kwargs):
"""
Register handler for edited channel post
:param callback:
:param commands: list of commands
:param regexp: REGEXP
:param content_types: List of content types.
:param state:
:param custom_filters: list of custom filters
:param run_task: run callback in task (no wait results)
:param kwargs:
:return: decorated function
"""
filters_set = self.filters_factory.resolve(self.edited_message_handlers,
*custom_filters,
commands=commands,
regexp=regexp,
content_types=content_types,
state=state,
**kwargs)
self.edited_channel_post_handlers.register(self._wrap_async_task(callback, run_task), filters_set)
|
[
"def",
"register_edited_channel_post_handler",
"(",
"self",
",",
"callback",
",",
"*",
"custom_filters",
",",
"commands",
"=",
"None",
",",
"regexp",
"=",
"None",
",",
"content_types",
"=",
"None",
",",
"state",
"=",
"None",
",",
"run_task",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"filters_set",
"=",
"self",
".",
"filters_factory",
".",
"resolve",
"(",
"self",
".",
"edited_message_handlers",
",",
"*",
"custom_filters",
",",
"commands",
"=",
"commands",
",",
"regexp",
"=",
"regexp",
",",
"content_types",
"=",
"content_types",
",",
"state",
"=",
"state",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"edited_channel_post_handlers",
".",
"register",
"(",
"self",
".",
"_wrap_async_task",
"(",
"callback",
",",
"run_task",
")",
",",
"filters_set",
")"
] | 52.26087 | 24.26087 |
def getDataRowCount(self):
"""
:returns: (int) count of data rows in dataset (excluding header lines)
"""
numLines = self._getTotalLineCount()
if numLines == 0:
# this may be the case in a file opened for write before the
# header rows are written out
assert self._mode == self._FILE_WRITE_MODE and self._recordCount == 0
numDataRows = 0
else:
numDataRows = numLines - self._NUM_HEADER_ROWS
assert numDataRows >= 0
return numDataRows
|
[
"def",
"getDataRowCount",
"(",
"self",
")",
":",
"numLines",
"=",
"self",
".",
"_getTotalLineCount",
"(",
")",
"if",
"numLines",
"==",
"0",
":",
"# this may be the case in a file opened for write before the",
"# header rows are written out",
"assert",
"self",
".",
"_mode",
"==",
"self",
".",
"_FILE_WRITE_MODE",
"and",
"self",
".",
"_recordCount",
"==",
"0",
"numDataRows",
"=",
"0",
"else",
":",
"numDataRows",
"=",
"numLines",
"-",
"self",
".",
"_NUM_HEADER_ROWS",
"assert",
"numDataRows",
">=",
"0",
"return",
"numDataRows"
] | 28.352941 | 20.352941 |
def cli_encrypt(context, key):
"""
Encrypts context.io_manager's stdin and sends that to
context.io_manager's stdout.
This can be useful to encrypt to disk before attempting to
upload, allowing uploads retries and segmented encrypted objects.
See :py:mod:`swiftly.cli.encrypt` for context usage information.
See :py:class:`CLIEncrypt` for more information.
"""
with context.io_manager.with_stdout() as stdout:
with context.io_manager.with_stdin() as stdin:
for chunk in aes_encrypt(key, stdin, preamble=AES256CBC):
stdout.write(chunk)
stdout.flush()
|
[
"def",
"cli_encrypt",
"(",
"context",
",",
"key",
")",
":",
"with",
"context",
".",
"io_manager",
".",
"with_stdout",
"(",
")",
"as",
"stdout",
":",
"with",
"context",
".",
"io_manager",
".",
"with_stdin",
"(",
")",
"as",
"stdin",
":",
"for",
"chunk",
"in",
"aes_encrypt",
"(",
"key",
",",
"stdin",
",",
"preamble",
"=",
"AES256CBC",
")",
":",
"stdout",
".",
"write",
"(",
"chunk",
")",
"stdout",
".",
"flush",
"(",
")"
] | 36.470588 | 18.823529 |
def _maybe_deserialize_body(self):
"""Attempt to deserialize the message body based upon the content-type.
:rtype: mixed
"""
if not self.content_type:
return self._message_body
ct = headers.parse_content_type(self.content_type)
key = '{}/{}'.format(ct.content_type, ct.content_subtype)
if key not in self._SERIALIZATION_MAP:
if key not in self._IGNORE_TYPES:
self.logger.debug('Unsupported content-type: %s',
self.content_type)
return self._message_body
elif not self._SERIALIZATION_MAP[key].get('enabled', True):
self.logger.debug('%s is not enabled in the serialization map',
key)
return self._message_body
value = self._message_body
if not self._SERIALIZATION_MAP[key].get('binary'):
value = self._maybe_decode(
self._message_body, ct.parameters.get('charset', 'utf-8'))
return self._maybe_invoke_serialization(value, 'load', key)
|
[
"def",
"_maybe_deserialize_body",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"content_type",
":",
"return",
"self",
".",
"_message_body",
"ct",
"=",
"headers",
".",
"parse_content_type",
"(",
"self",
".",
"content_type",
")",
"key",
"=",
"'{}/{}'",
".",
"format",
"(",
"ct",
".",
"content_type",
",",
"ct",
".",
"content_subtype",
")",
"if",
"key",
"not",
"in",
"self",
".",
"_SERIALIZATION_MAP",
":",
"if",
"key",
"not",
"in",
"self",
".",
"_IGNORE_TYPES",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'Unsupported content-type: %s'",
",",
"self",
".",
"content_type",
")",
"return",
"self",
".",
"_message_body",
"elif",
"not",
"self",
".",
"_SERIALIZATION_MAP",
"[",
"key",
"]",
".",
"get",
"(",
"'enabled'",
",",
"True",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'%s is not enabled in the serialization map'",
",",
"key",
")",
"return",
"self",
".",
"_message_body",
"value",
"=",
"self",
".",
"_message_body",
"if",
"not",
"self",
".",
"_SERIALIZATION_MAP",
"[",
"key",
"]",
".",
"get",
"(",
"'binary'",
")",
":",
"value",
"=",
"self",
".",
"_maybe_decode",
"(",
"self",
".",
"_message_body",
",",
"ct",
".",
"parameters",
".",
"get",
"(",
"'charset'",
",",
"'utf-8'",
")",
")",
"return",
"self",
".",
"_maybe_invoke_serialization",
"(",
"value",
",",
"'load'",
",",
"key",
")"
] | 44.5 | 15.25 |
def all(self, store_id, product_id, get_all=False, **queryparams):
"""
Get information about a product’s images.
:param store_id: The store id.
:type store_id: :py:class:`str`
:param product_id: The id for the product of a store.
:type product_id: :py:class:`str`
:param get_all: Should the query get all results
:type get_all: :py:class:`bool`
:param queryparams: The query string parameters
queryparams['fields'] = []
queryparams['exclude_fields'] = []
queryparams['count'] = integer
queryparams['offset'] = integer
"""
self.store_id = store_id
self.product_id = product_id
self.image_id = None
if get_all:
return self._iterate(url=self._build_path(store_id, 'products', product_id, 'images'), **queryparams)
else:
return self._mc_client._post(url=self._build_path(store_id, 'products', product_id, 'images'), **queryparams)
|
[
"def",
"all",
"(",
"self",
",",
"store_id",
",",
"product_id",
",",
"get_all",
"=",
"False",
",",
"*",
"*",
"queryparams",
")",
":",
"self",
".",
"store_id",
"=",
"store_id",
"self",
".",
"product_id",
"=",
"product_id",
"self",
".",
"image_id",
"=",
"None",
"if",
"get_all",
":",
"return",
"self",
".",
"_iterate",
"(",
"url",
"=",
"self",
".",
"_build_path",
"(",
"store_id",
",",
"'products'",
",",
"product_id",
",",
"'images'",
")",
",",
"*",
"*",
"queryparams",
")",
"else",
":",
"return",
"self",
".",
"_mc_client",
".",
"_post",
"(",
"url",
"=",
"self",
".",
"_build_path",
"(",
"store_id",
",",
"'products'",
",",
"product_id",
",",
"'images'",
")",
",",
"*",
"*",
"queryparams",
")"
] | 42.652174 | 16.043478 |
def GetScriptHashesForVerifying(self):
"""
Get a list of script hashes for verifying transactions.
Raises:
Exception: if there are no valid transactions to claim from.
Returns:
list: of UInt160 type script hashes.
"""
hashes = super(ClaimTransaction, self).GetScriptHashesForVerifying()
for hash, group in groupby(self.Claims, lambda x: x.PrevHash):
tx, height = Blockchain.Default().GetTransaction(hash)
if tx is None:
raise Exception("Invalid Claim Operation")
for claim in group:
if len(tx.outputs) <= claim.PrevIndex:
raise Exception("Invalid Claim Operation")
script_hash = tx.outputs[claim.PrevIndex].ScriptHash
if script_hash not in hashes:
hashes.append(script_hash)
hashes.sort()
return hashes
|
[
"def",
"GetScriptHashesForVerifying",
"(",
"self",
")",
":",
"hashes",
"=",
"super",
"(",
"ClaimTransaction",
",",
"self",
")",
".",
"GetScriptHashesForVerifying",
"(",
")",
"for",
"hash",
",",
"group",
"in",
"groupby",
"(",
"self",
".",
"Claims",
",",
"lambda",
"x",
":",
"x",
".",
"PrevHash",
")",
":",
"tx",
",",
"height",
"=",
"Blockchain",
".",
"Default",
"(",
")",
".",
"GetTransaction",
"(",
"hash",
")",
"if",
"tx",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"Invalid Claim Operation\"",
")",
"for",
"claim",
"in",
"group",
":",
"if",
"len",
"(",
"tx",
".",
"outputs",
")",
"<=",
"claim",
".",
"PrevIndex",
":",
"raise",
"Exception",
"(",
"\"Invalid Claim Operation\"",
")",
"script_hash",
"=",
"tx",
".",
"outputs",
"[",
"claim",
".",
"PrevIndex",
"]",
".",
"ScriptHash",
"if",
"script_hash",
"not",
"in",
"hashes",
":",
"hashes",
".",
"append",
"(",
"script_hash",
")",
"hashes",
".",
"sort",
"(",
")",
"return",
"hashes"
] | 30.6 | 24 |
def find_tag(match: str, strict: bool, directory: str):
"""Find tag for git repository."""
with suppress(CalledProcessError):
echo(git.find_tag(match, strict=strict, git_dir=directory))
|
[
"def",
"find_tag",
"(",
"match",
":",
"str",
",",
"strict",
":",
"bool",
",",
"directory",
":",
"str",
")",
":",
"with",
"suppress",
"(",
"CalledProcessError",
")",
":",
"echo",
"(",
"git",
".",
"find_tag",
"(",
"match",
",",
"strict",
"=",
"strict",
",",
"git_dir",
"=",
"directory",
")",
")"
] | 49.5 | 11 |
def _num_plurals(self, catalogue):
"""
Return the number of plurals for this catalog language, or 2 if no
plural string is available.
"""
match = re.search(r'nplurals=\s*(\d+)', self.get_plural(catalogue) or '')
if match:
return int(match.groups()[0])
return 2
|
[
"def",
"_num_plurals",
"(",
"self",
",",
"catalogue",
")",
":",
"match",
"=",
"re",
".",
"search",
"(",
"r'nplurals=\\s*(\\d+)'",
",",
"self",
".",
"get_plural",
"(",
"catalogue",
")",
"or",
"''",
")",
"if",
"match",
":",
"return",
"int",
"(",
"match",
".",
"groups",
"(",
")",
"[",
"0",
"]",
")",
"return",
"2"
] | 35.555556 | 14.888889 |
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
if self.minisat:
pysolvers.minisatgh_pbudget(self.minisat, budget)
|
[
"def",
"prop_budget",
"(",
"self",
",",
"budget",
")",
":",
"if",
"self",
".",
"minisat",
":",
"pysolvers",
".",
"minisatgh_pbudget",
"(",
"self",
".",
"minisat",
",",
"budget",
")"
] | 27 | 14.142857 |
def show_stacking(self):
"""Visualizes pi-stacking interactions."""
grp = self.getPseudoBondGroup("pi-Stacking-%i" % self.tid, associateWith=[self.model])
grp.lineWidth = 3
grp.lineType = self.chimera.Dash
for i, stack in enumerate(self.plcomplex.pistacking):
m = self.model
r = m.newResidue("pseudoatoms", " ", 1, " ")
centroid_prot = m.newAtom("CENTROID", self.chimera.Element("CENTROID"))
x, y, z = stack.proteinring_center
centroid_prot.setCoord(self.chimera.Coord(x, y, z))
r.addAtom(centroid_prot)
centroid_lig = m.newAtom("CENTROID", self.chimera.Element("CENTROID"))
x, y, z = stack.ligandring_center
centroid_lig.setCoord(self.chimera.Coord(x, y, z))
r.addAtom(centroid_lig)
b = grp.newPseudoBond(centroid_lig, centroid_prot)
b.color = self.colorbyname('forest green')
self.bs_res_ids += stack.proteinring_atoms
|
[
"def",
"show_stacking",
"(",
"self",
")",
":",
"grp",
"=",
"self",
".",
"getPseudoBondGroup",
"(",
"\"pi-Stacking-%i\"",
"%",
"self",
".",
"tid",
",",
"associateWith",
"=",
"[",
"self",
".",
"model",
"]",
")",
"grp",
".",
"lineWidth",
"=",
"3",
"grp",
".",
"lineType",
"=",
"self",
".",
"chimera",
".",
"Dash",
"for",
"i",
",",
"stack",
"in",
"enumerate",
"(",
"self",
".",
"plcomplex",
".",
"pistacking",
")",
":",
"m",
"=",
"self",
".",
"model",
"r",
"=",
"m",
".",
"newResidue",
"(",
"\"pseudoatoms\"",
",",
"\" \"",
",",
"1",
",",
"\" \"",
")",
"centroid_prot",
"=",
"m",
".",
"newAtom",
"(",
"\"CENTROID\"",
",",
"self",
".",
"chimera",
".",
"Element",
"(",
"\"CENTROID\"",
")",
")",
"x",
",",
"y",
",",
"z",
"=",
"stack",
".",
"proteinring_center",
"centroid_prot",
".",
"setCoord",
"(",
"self",
".",
"chimera",
".",
"Coord",
"(",
"x",
",",
"y",
",",
"z",
")",
")",
"r",
".",
"addAtom",
"(",
"centroid_prot",
")",
"centroid_lig",
"=",
"m",
".",
"newAtom",
"(",
"\"CENTROID\"",
",",
"self",
".",
"chimera",
".",
"Element",
"(",
"\"CENTROID\"",
")",
")",
"x",
",",
"y",
",",
"z",
"=",
"stack",
".",
"ligandring_center",
"centroid_lig",
".",
"setCoord",
"(",
"self",
".",
"chimera",
".",
"Coord",
"(",
"x",
",",
"y",
",",
"z",
")",
")",
"r",
".",
"addAtom",
"(",
"centroid_lig",
")",
"b",
"=",
"grp",
".",
"newPseudoBond",
"(",
"centroid_lig",
",",
"centroid_prot",
")",
"b",
".",
"color",
"=",
"self",
".",
"colorbyname",
"(",
"'forest green'",
")",
"self",
".",
"bs_res_ids",
"+=",
"stack",
".",
"proteinring_atoms"
] | 43.391304 | 21.565217 |
def post_create_table(self, table):
"""Build table-level CREATE options."""
table_opts = []
if 'impala_partition_by' in table.kwargs:
table_opts.append('PARTITION BY %s' % table.kwargs.get('impala_partition_by'))
if 'impala_stored_as' in table.kwargs:
table_opts.append('STORED AS %s' % table.kwargs.get('impala_stored_as'))
if 'impala_table_properties' in table.kwargs:
table_properties = ["'{0}' = '{1}'".format(property_, value)
for property_, value
in table.kwargs.get('impala_table_properties', {}).items()]
table_opts.append('TBLPROPERTIES (%s)' % ', '.join(table_properties))
return '\n%s' % '\n'.join(table_opts)
|
[
"def",
"post_create_table",
"(",
"self",
",",
"table",
")",
":",
"table_opts",
"=",
"[",
"]",
"if",
"'impala_partition_by'",
"in",
"table",
".",
"kwargs",
":",
"table_opts",
".",
"append",
"(",
"'PARTITION BY %s'",
"%",
"table",
".",
"kwargs",
".",
"get",
"(",
"'impala_partition_by'",
")",
")",
"if",
"'impala_stored_as'",
"in",
"table",
".",
"kwargs",
":",
"table_opts",
".",
"append",
"(",
"'STORED AS %s'",
"%",
"table",
".",
"kwargs",
".",
"get",
"(",
"'impala_stored_as'",
")",
")",
"if",
"'impala_table_properties'",
"in",
"table",
".",
"kwargs",
":",
"table_properties",
"=",
"[",
"\"'{0}' = '{1}'\"",
".",
"format",
"(",
"property_",
",",
"value",
")",
"for",
"property_",
",",
"value",
"in",
"table",
".",
"kwargs",
".",
"get",
"(",
"'impala_table_properties'",
",",
"{",
"}",
")",
".",
"items",
"(",
")",
"]",
"table_opts",
".",
"append",
"(",
"'TBLPROPERTIES (%s)'",
"%",
"', '",
".",
"join",
"(",
"table_properties",
")",
")",
"return",
"'\\n%s'",
"%",
"'\\n'",
".",
"join",
"(",
"table_opts",
")"
] | 45.176471 | 26.176471 |
def handle_omp_for(self, node, local_iter):
"""
Fix OpenMP directives on For loops.
Add the target as private variable as a new variable may have been
introduce to handle cxx iterator.
Also, add the iterator as shared variable as all 'parallel for chunck'
have to use the same iterator.
"""
for directive in metadata.get(node, OMPDirective):
if any(key in directive.s for key in (' parallel ', ' task ')):
# Eventually add local_iter in a shared clause as iterable is
# shared in the for loop (for every clause with datasharing)
directive.s += ' shared({})'
directive.deps.append(ast.Name(local_iter, ast.Load(), None))
directive.shared_deps.append(directive.deps[-1])
target = node.target
assert isinstance(target, ast.Name)
hasfor = 'for' in directive.s
nodefault = 'default' not in directive.s
noindexref = all(isinstance(x, ast.Name) and
x.id != target.id for x in directive.deps)
if (hasfor and nodefault and noindexref and
target.id not in self.scope[node]):
# Target is private by default in omp but iterator use may
# introduce an extra variable
directive.s += ' private({})'
directive.deps.append(ast.Name(target.id, ast.Load(), None))
directive.private_deps.append(directive.deps[-1])
|
[
"def",
"handle_omp_for",
"(",
"self",
",",
"node",
",",
"local_iter",
")",
":",
"for",
"directive",
"in",
"metadata",
".",
"get",
"(",
"node",
",",
"OMPDirective",
")",
":",
"if",
"any",
"(",
"key",
"in",
"directive",
".",
"s",
"for",
"key",
"in",
"(",
"' parallel '",
",",
"' task '",
")",
")",
":",
"# Eventually add local_iter in a shared clause as iterable is",
"# shared in the for loop (for every clause with datasharing)",
"directive",
".",
"s",
"+=",
"' shared({})'",
"directive",
".",
"deps",
".",
"append",
"(",
"ast",
".",
"Name",
"(",
"local_iter",
",",
"ast",
".",
"Load",
"(",
")",
",",
"None",
")",
")",
"directive",
".",
"shared_deps",
".",
"append",
"(",
"directive",
".",
"deps",
"[",
"-",
"1",
"]",
")",
"target",
"=",
"node",
".",
"target",
"assert",
"isinstance",
"(",
"target",
",",
"ast",
".",
"Name",
")",
"hasfor",
"=",
"'for'",
"in",
"directive",
".",
"s",
"nodefault",
"=",
"'default'",
"not",
"in",
"directive",
".",
"s",
"noindexref",
"=",
"all",
"(",
"isinstance",
"(",
"x",
",",
"ast",
".",
"Name",
")",
"and",
"x",
".",
"id",
"!=",
"target",
".",
"id",
"for",
"x",
"in",
"directive",
".",
"deps",
")",
"if",
"(",
"hasfor",
"and",
"nodefault",
"and",
"noindexref",
"and",
"target",
".",
"id",
"not",
"in",
"self",
".",
"scope",
"[",
"node",
"]",
")",
":",
"# Target is private by default in omp but iterator use may",
"# introduce an extra variable",
"directive",
".",
"s",
"+=",
"' private({})'",
"directive",
".",
"deps",
".",
"append",
"(",
"ast",
".",
"Name",
"(",
"target",
".",
"id",
",",
"ast",
".",
"Load",
"(",
")",
",",
"None",
")",
")",
"directive",
".",
"private_deps",
".",
"append",
"(",
"directive",
".",
"deps",
"[",
"-",
"1",
"]",
")"
] | 49.16129 | 19.419355 |
def clear_socket(self):
'''
delete socket if you have it
'''
if hasattr(self, '_socket'):
if isinstance(self.poller.sockets, dict):
sockets = list(self.poller.sockets.keys())
for socket in sockets:
log.trace('Unregistering socket: %s', socket)
self.poller.unregister(socket)
else:
for socket in self.poller.sockets:
log.trace('Unregistering socket: %s', socket)
self.poller.unregister(socket[0])
del self._socket
|
[
"def",
"clear_socket",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"'_socket'",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"poller",
".",
"sockets",
",",
"dict",
")",
":",
"sockets",
"=",
"list",
"(",
"self",
".",
"poller",
".",
"sockets",
".",
"keys",
"(",
")",
")",
"for",
"socket",
"in",
"sockets",
":",
"log",
".",
"trace",
"(",
"'Unregistering socket: %s'",
",",
"socket",
")",
"self",
".",
"poller",
".",
"unregister",
"(",
"socket",
")",
"else",
":",
"for",
"socket",
"in",
"self",
".",
"poller",
".",
"sockets",
":",
"log",
".",
"trace",
"(",
"'Unregistering socket: %s'",
",",
"socket",
")",
"self",
".",
"poller",
".",
"unregister",
"(",
"socket",
"[",
"0",
"]",
")",
"del",
"self",
".",
"_socket"
] | 39.6 | 15.6 |
def get_catalogs(portal):
"""Returns the catalogs from the site
"""
res = []
for object in portal.objectValues():
if ICatalogTool.providedBy(object):
res.append(object)
elif IZCatalog.providedBy(object):
res.append(object)
res.sort()
return res
|
[
"def",
"get_catalogs",
"(",
"portal",
")",
":",
"res",
"=",
"[",
"]",
"for",
"object",
"in",
"portal",
".",
"objectValues",
"(",
")",
":",
"if",
"ICatalogTool",
".",
"providedBy",
"(",
"object",
")",
":",
"res",
".",
"append",
"(",
"object",
")",
"elif",
"IZCatalog",
".",
"providedBy",
"(",
"object",
")",
":",
"res",
".",
"append",
"(",
"object",
")",
"res",
".",
"sort",
"(",
")",
"return",
"res"
] | 27.090909 | 10.909091 |
def DOM_setFileInputFiles(self, files, **kwargs):
"""
Function path: DOM.setFileInputFiles
Domain: DOM
Method name: setFileInputFiles
WARNING: This function is marked 'Experimental'!
Parameters:
Required arguments:
'files' (type: array) -> Array of file paths to set.
Optional arguments:
'nodeId' (type: NodeId) -> Identifier of the node.
'backendNodeId' (type: BackendNodeId) -> Identifier of the backend node.
'objectId' (type: Runtime.RemoteObjectId) -> JavaScript object id of the node wrapper.
No return value.
Description: Sets files for the given file input element.
"""
assert isinstance(files, (list, tuple)
), "Argument 'files' must be of type '['list', 'tuple']'. Received type: '%s'" % type(
files)
expected = ['nodeId', 'backendNodeId', 'objectId']
passed_keys = list(kwargs.keys())
assert all([(key in expected) for key in passed_keys]
), "Allowed kwargs are ['nodeId', 'backendNodeId', 'objectId']. Passed kwargs: %s" % passed_keys
subdom_funcs = self.synchronous_command('DOM.setFileInputFiles', files=
files, **kwargs)
return subdom_funcs
|
[
"def",
"DOM_setFileInputFiles",
"(",
"self",
",",
"files",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"isinstance",
"(",
"files",
",",
"(",
"list",
",",
"tuple",
")",
")",
",",
"\"Argument 'files' must be of type '['list', 'tuple']'. Received type: '%s'\"",
"%",
"type",
"(",
"files",
")",
"expected",
"=",
"[",
"'nodeId'",
",",
"'backendNodeId'",
",",
"'objectId'",
"]",
"passed_keys",
"=",
"list",
"(",
"kwargs",
".",
"keys",
"(",
")",
")",
"assert",
"all",
"(",
"[",
"(",
"key",
"in",
"expected",
")",
"for",
"key",
"in",
"passed_keys",
"]",
")",
",",
"\"Allowed kwargs are ['nodeId', 'backendNodeId', 'objectId']. Passed kwargs: %s\"",
"%",
"passed_keys",
"subdom_funcs",
"=",
"self",
".",
"synchronous_command",
"(",
"'DOM.setFileInputFiles'",
",",
"files",
"=",
"files",
",",
"*",
"*",
"kwargs",
")",
"return",
"subdom_funcs"
] | 38.758621 | 21.862069 |
def rand_blend_mask(shape, rand=rand.uniform(-10, 10), **kwargs):
""" random blending masks """
# batch, channel = shape[0], shape[3]
z = rand(shape[0]) # seed
noise = snoise2dz((shape[1], shape[2]), z, **kwargs)
return noise
|
[
"def",
"rand_blend_mask",
"(",
"shape",
",",
"rand",
"=",
"rand",
".",
"uniform",
"(",
"-",
"10",
",",
"10",
")",
",",
"*",
"*",
"kwargs",
")",
":",
"# batch, channel = shape[0], shape[3]",
"z",
"=",
"rand",
"(",
"shape",
"[",
"0",
"]",
")",
"# seed",
"noise",
"=",
"snoise2dz",
"(",
"(",
"shape",
"[",
"1",
"]",
",",
"shape",
"[",
"2",
"]",
")",
",",
"z",
",",
"*",
"*",
"kwargs",
")",
"return",
"noise"
] | 34.428571 | 16.571429 |
def load_django_settings(self):
"""
Loads Django settings for the current site and sets them so Django internals can be run.
"""
r = self.local_renderer
# Save environment variables so we can restore them later.
_env = {}
save_vars = ['ALLOW_CELERY', 'DJANGO_SETTINGS_MODULE']
for var_name in save_vars:
_env[var_name] = os.environ.get(var_name)
try:
# Allow us to import local app modules.
if r.env.local_project_dir:
sys.path.insert(0, r.env.local_project_dir)
#TODO:remove this once bug in django-celery has been fixed
os.environ['ALLOW_CELERY'] = '0'
# print('settings_module:', r.format(r.env.settings_module))
os.environ['DJANGO_SETTINGS_MODULE'] = r.format(r.env.settings_module)
# os.environ['CELERY_LOADER'] = 'django'
# os.environ['SITE'] = r.genv.SITE or r.genv.default_site
# os.environ['ROLE'] = r.genv.ROLE or r.genv.default_role
# In Django >= 1.7, fixes the error AppRegistryNotReady: Apps aren't loaded yet
# Disabling, in Django >= 1.10, throws exception:
# RuntimeError: Model class django.contrib.contenttypes.models.ContentType
# doesn't declare an explicit app_label and isn't in an application in INSTALLED_APPS.
# try:
# from django.core.wsgi import get_wsgi_application
# application = get_wsgi_application()
# except (ImportError, RuntimeError):
# raise
# print('Unable to get wsgi application.')
# traceback.print_exc()
# In Django >= 1.7, fixes the error AppRegistryNotReady: Apps aren't loaded yet
try:
import django
django.setup()
except AttributeError:
# This doesn't exist in Django < 1.7, so ignore it.
pass
# Load Django settings.
settings = self.get_settings()
try:
from django.contrib import staticfiles
from django.conf import settings as _settings
# get_settings() doesn't raise ImportError but returns None instead
if settings is not None:
for k, v in settings.__dict__.items():
setattr(_settings, k, v)
else:
raise ImportError
except (ImportError, RuntimeError):
print('Unable to load settings.')
traceback.print_exc()
finally:
# Restore environment variables.
for var_name, var_value in _env.items():
if var_value is None:
del os.environ[var_name]
else:
os.environ[var_name] = var_value
return settings
|
[
"def",
"load_django_settings",
"(",
"self",
")",
":",
"r",
"=",
"self",
".",
"local_renderer",
"# Save environment variables so we can restore them later.",
"_env",
"=",
"{",
"}",
"save_vars",
"=",
"[",
"'ALLOW_CELERY'",
",",
"'DJANGO_SETTINGS_MODULE'",
"]",
"for",
"var_name",
"in",
"save_vars",
":",
"_env",
"[",
"var_name",
"]",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"var_name",
")",
"try",
":",
"# Allow us to import local app modules.",
"if",
"r",
".",
"env",
".",
"local_project_dir",
":",
"sys",
".",
"path",
".",
"insert",
"(",
"0",
",",
"r",
".",
"env",
".",
"local_project_dir",
")",
"#TODO:remove this once bug in django-celery has been fixed",
"os",
".",
"environ",
"[",
"'ALLOW_CELERY'",
"]",
"=",
"'0'",
"# print('settings_module:', r.format(r.env.settings_module))",
"os",
".",
"environ",
"[",
"'DJANGO_SETTINGS_MODULE'",
"]",
"=",
"r",
".",
"format",
"(",
"r",
".",
"env",
".",
"settings_module",
")",
"# os.environ['CELERY_LOADER'] = 'django'",
"# os.environ['SITE'] = r.genv.SITE or r.genv.default_site",
"# os.environ['ROLE'] = r.genv.ROLE or r.genv.default_role",
"# In Django >= 1.7, fixes the error AppRegistryNotReady: Apps aren't loaded yet",
"# Disabling, in Django >= 1.10, throws exception:",
"# RuntimeError: Model class django.contrib.contenttypes.models.ContentType",
"# doesn't declare an explicit app_label and isn't in an application in INSTALLED_APPS.",
"# try:",
"# from django.core.wsgi import get_wsgi_application",
"# application = get_wsgi_application()",
"# except (ImportError, RuntimeError):",
"# raise",
"# print('Unable to get wsgi application.')",
"# traceback.print_exc()",
"# In Django >= 1.7, fixes the error AppRegistryNotReady: Apps aren't loaded yet",
"try",
":",
"import",
"django",
"django",
".",
"setup",
"(",
")",
"except",
"AttributeError",
":",
"# This doesn't exist in Django < 1.7, so ignore it.",
"pass",
"# Load Django settings.",
"settings",
"=",
"self",
".",
"get_settings",
"(",
")",
"try",
":",
"from",
"django",
".",
"contrib",
"import",
"staticfiles",
"from",
"django",
".",
"conf",
"import",
"settings",
"as",
"_settings",
"# get_settings() doesn't raise ImportError but returns None instead",
"if",
"settings",
"is",
"not",
"None",
":",
"for",
"k",
",",
"v",
"in",
"settings",
".",
"__dict__",
".",
"items",
"(",
")",
":",
"setattr",
"(",
"_settings",
",",
"k",
",",
"v",
")",
"else",
":",
"raise",
"ImportError",
"except",
"(",
"ImportError",
",",
"RuntimeError",
")",
":",
"print",
"(",
"'Unable to load settings.'",
")",
"traceback",
".",
"print_exc",
"(",
")",
"finally",
":",
"# Restore environment variables.",
"for",
"var_name",
",",
"var_value",
"in",
"_env",
".",
"items",
"(",
")",
":",
"if",
"var_value",
"is",
"None",
":",
"del",
"os",
".",
"environ",
"[",
"var_name",
"]",
"else",
":",
"os",
".",
"environ",
"[",
"var_name",
"]",
"=",
"var_value",
"return",
"settings"
] | 39.986111 | 21.458333 |
def tostring(self, inject):
"""
Convert an element to a single string and allow the passed inject method to place content before any
element.
"""
injected_parts = ''
for part in self.parts:
injected = part.tostring(inject)
tei_tag = next(
(attribute for attribute in part.attributes if attribute.key == "tei-tag"), None)
if tei_tag and tei_tag.text == "w" and injected_parts:
# make sure words can be tokenized correctly
if injected_parts and injected_parts[-1] != ' ':
injected_parts += ' '
injected_parts += injected.strip() + ' '
else:
injected_parts += injected
return inject(self, injected_parts)
|
[
"def",
"tostring",
"(",
"self",
",",
"inject",
")",
":",
"injected_parts",
"=",
"''",
"for",
"part",
"in",
"self",
".",
"parts",
":",
"injected",
"=",
"part",
".",
"tostring",
"(",
"inject",
")",
"tei_tag",
"=",
"next",
"(",
"(",
"attribute",
"for",
"attribute",
"in",
"part",
".",
"attributes",
"if",
"attribute",
".",
"key",
"==",
"\"tei-tag\"",
")",
",",
"None",
")",
"if",
"tei_tag",
"and",
"tei_tag",
".",
"text",
"==",
"\"w\"",
"and",
"injected_parts",
":",
"# make sure words can be tokenized correctly",
"if",
"injected_parts",
"and",
"injected_parts",
"[",
"-",
"1",
"]",
"!=",
"' '",
":",
"injected_parts",
"+=",
"' '",
"injected_parts",
"+=",
"injected",
".",
"strip",
"(",
")",
"+",
"' '",
"else",
":",
"injected_parts",
"+=",
"injected",
"return",
"inject",
"(",
"self",
",",
"injected_parts",
")"
] | 41.473684 | 18.736842 |
def numpyAlignXY(data):
"""
given a numpy array (XYXYXY columns), return it aligned.
data returned will be XYYY. NANs may be returned.
"""
print(data)
Xs=data.flatten()[::2] # get all X values
Xs=Xs[~np.isnan(Xs)] # remove nans
Xs=sorted(list(set(Xs))) # eliminate duplicates then sort it
aligned=np.empty((len(Xs),int(len(data[0])/2+1)))*np.nan
aligned[:,0]=Xs
for col in range(0,len(data[0]),2):
for row in range(len(data)):
X=data[row,col]
Y=data[row,col+1]
if np.isnan(X) or np.isnan(Y):
continue
aligned[Xs.index(X),int(col/2+1)]=Y
return aligned
|
[
"def",
"numpyAlignXY",
"(",
"data",
")",
":",
"print",
"(",
"data",
")",
"Xs",
"=",
"data",
".",
"flatten",
"(",
")",
"[",
":",
":",
"2",
"]",
"# get all X values",
"Xs",
"=",
"Xs",
"[",
"~",
"np",
".",
"isnan",
"(",
"Xs",
")",
"]",
"# remove nans",
"Xs",
"=",
"sorted",
"(",
"list",
"(",
"set",
"(",
"Xs",
")",
")",
")",
"# eliminate duplicates then sort it",
"aligned",
"=",
"np",
".",
"empty",
"(",
"(",
"len",
"(",
"Xs",
")",
",",
"int",
"(",
"len",
"(",
"data",
"[",
"0",
"]",
")",
"/",
"2",
"+",
"1",
")",
")",
")",
"*",
"np",
".",
"nan",
"aligned",
"[",
":",
",",
"0",
"]",
"=",
"Xs",
"for",
"col",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"data",
"[",
"0",
"]",
")",
",",
"2",
")",
":",
"for",
"row",
"in",
"range",
"(",
"len",
"(",
"data",
")",
")",
":",
"X",
"=",
"data",
"[",
"row",
",",
"col",
"]",
"Y",
"=",
"data",
"[",
"row",
",",
"col",
"+",
"1",
"]",
"if",
"np",
".",
"isnan",
"(",
"X",
")",
"or",
"np",
".",
"isnan",
"(",
"Y",
")",
":",
"continue",
"aligned",
"[",
"Xs",
".",
"index",
"(",
"X",
")",
",",
"int",
"(",
"col",
"/",
"2",
"+",
"1",
")",
"]",
"=",
"Y",
"return",
"aligned"
] | 34.368421 | 11.736842 |
def get_dependants(cls, dist):
"""Yield dependant user packages for a given package name."""
for package in cls.installed_distributions:
for requirement_package in package.requires():
requirement_name = requirement_package.project_name
# perform case-insensitive matching
if requirement_name.lower() == dist.lower():
yield package
|
[
"def",
"get_dependants",
"(",
"cls",
",",
"dist",
")",
":",
"for",
"package",
"in",
"cls",
".",
"installed_distributions",
":",
"for",
"requirement_package",
"in",
"package",
".",
"requires",
"(",
")",
":",
"requirement_name",
"=",
"requirement_package",
".",
"project_name",
"# perform case-insensitive matching",
"if",
"requirement_name",
".",
"lower",
"(",
")",
"==",
"dist",
".",
"lower",
"(",
")",
":",
"yield",
"package"
] | 52.375 | 13 |
def transform_symmop(self, symmop):
# type: (Union[SymmOp, MagSymmOp]) -> Union[SymmOp, MagSymmOp]
"""
Takes a symmetry operation and transforms it.
:param symmop: SymmOp or MagSymmOp
:return:
"""
W = symmop.rotation_matrix
w = symmop.translation_vector
Q = np.linalg.inv(self.P)
W_ = np.matmul(np.matmul(Q, W), self.P)
I = np.identity(3)
w_ = np.matmul(Q, (w + np.matmul(W - I, self.p)))
if isinstance(symmop, MagSymmOp):
return MagSymmOp.from_rotation_and_translation_and_time_reversal(
rotation_matrix=W_, translation_vec=w_,
time_reversal=symmop.time_reversal, tol=symmop.tol)
elif isinstance(symmop, SymmOp):
return SymmOp.from_rotation_and_translation(
rotation_matrix=W_, translation_vec=w_, tol=symmop.tol)
|
[
"def",
"transform_symmop",
"(",
"self",
",",
"symmop",
")",
":",
"# type: (Union[SymmOp, MagSymmOp]) -> Union[SymmOp, MagSymmOp]",
"W",
"=",
"symmop",
".",
"rotation_matrix",
"w",
"=",
"symmop",
".",
"translation_vector",
"Q",
"=",
"np",
".",
"linalg",
".",
"inv",
"(",
"self",
".",
"P",
")",
"W_",
"=",
"np",
".",
"matmul",
"(",
"np",
".",
"matmul",
"(",
"Q",
",",
"W",
")",
",",
"self",
".",
"P",
")",
"I",
"=",
"np",
".",
"identity",
"(",
"3",
")",
"w_",
"=",
"np",
".",
"matmul",
"(",
"Q",
",",
"(",
"w",
"+",
"np",
".",
"matmul",
"(",
"W",
"-",
"I",
",",
"self",
".",
"p",
")",
")",
")",
"if",
"isinstance",
"(",
"symmop",
",",
"MagSymmOp",
")",
":",
"return",
"MagSymmOp",
".",
"from_rotation_and_translation_and_time_reversal",
"(",
"rotation_matrix",
"=",
"W_",
",",
"translation_vec",
"=",
"w_",
",",
"time_reversal",
"=",
"symmop",
".",
"time_reversal",
",",
"tol",
"=",
"symmop",
".",
"tol",
")",
"elif",
"isinstance",
"(",
"symmop",
",",
"SymmOp",
")",
":",
"return",
"SymmOp",
".",
"from_rotation_and_translation",
"(",
"rotation_matrix",
"=",
"W_",
",",
"translation_vec",
"=",
"w_",
",",
"tol",
"=",
"symmop",
".",
"tol",
")"
] | 44 | 12.7 |
def pad(idnad: Tuple[List[float], List[float]], pad_nadir=0.05, pad_ideal=0.0):
"""
Pad an ideal/nadir estimate. This is mainly useful for padding the nadir
estimated by a payoff table for safety purposes.
"""
ideal, nadir = idnad
ideal_arr = np.array(ideal)
nadir_arr = np.array(nadir)
idnad_range = nadir_arr - ideal_arr
nadir_arr += pad_nadir * idnad_range
ideal_arr -= pad_ideal * idnad_range
return list(ideal_arr), list(nadir_arr)
|
[
"def",
"pad",
"(",
"idnad",
":",
"Tuple",
"[",
"List",
"[",
"float",
"]",
",",
"List",
"[",
"float",
"]",
"]",
",",
"pad_nadir",
"=",
"0.05",
",",
"pad_ideal",
"=",
"0.0",
")",
":",
"ideal",
",",
"nadir",
"=",
"idnad",
"ideal_arr",
"=",
"np",
".",
"array",
"(",
"ideal",
")",
"nadir_arr",
"=",
"np",
".",
"array",
"(",
"nadir",
")",
"idnad_range",
"=",
"nadir_arr",
"-",
"ideal_arr",
"nadir_arr",
"+=",
"pad_nadir",
"*",
"idnad_range",
"ideal_arr",
"-=",
"pad_ideal",
"*",
"idnad_range",
"return",
"list",
"(",
"ideal_arr",
")",
",",
"list",
"(",
"nadir_arr",
")"
] | 39.083333 | 10.416667 |
def status_for_all_orders_in_a_stock(self, stock):
"""Status for all orders in a stock
https://starfighter.readme.io/docs/status-for-all-orders-in-a-stock
"""
url_fragment = 'venues/{venue}/accounts/{account}/stocks/{stock}/orders'.format(
stock=stock,
venue=self.venue,
account=self.account,
)
url = urljoin(self.base_url, url_fragment)
return self.session.get(url).json()
|
[
"def",
"status_for_all_orders_in_a_stock",
"(",
"self",
",",
"stock",
")",
":",
"url_fragment",
"=",
"'venues/{venue}/accounts/{account}/stocks/{stock}/orders'",
".",
"format",
"(",
"stock",
"=",
"stock",
",",
"venue",
"=",
"self",
".",
"venue",
",",
"account",
"=",
"self",
".",
"account",
",",
")",
"url",
"=",
"urljoin",
"(",
"self",
".",
"base_url",
",",
"url_fragment",
")",
"return",
"self",
".",
"session",
".",
"get",
"(",
"url",
")",
".",
"json",
"(",
")"
] | 37.916667 | 17.583333 |
def get_group_dns(self):
"""
Returns a (cached) set of the distinguished names in self._group_infos.
"""
if self._group_dns is None:
group_infos = self._get_group_infos()
self._group_dns = set(group_info[0] for group_info in group_infos)
return self._group_dns
|
[
"def",
"get_group_dns",
"(",
"self",
")",
":",
"if",
"self",
".",
"_group_dns",
"is",
"None",
":",
"group_infos",
"=",
"self",
".",
"_get_group_infos",
"(",
")",
"self",
".",
"_group_dns",
"=",
"set",
"(",
"group_info",
"[",
"0",
"]",
"for",
"group_info",
"in",
"group_infos",
")",
"return",
"self",
".",
"_group_dns"
] | 35.222222 | 17.444444 |
def apply(self, styles=None, verbose=False):
"""
Applies the specified style to the selected views and returns the
SUIDs of the affected views.
:param styles (string): Name of Style to be applied to the selected
views. = ['Directed', 'BioPAX_SIF', 'Bridging Reads Histogram:unique_0',
'PSIMI 25 Style', 'Coverage Histogram:best&unique', 'Minimal',
'Bridging Reads Histogram:best&unique_0', 'Coverage Histogram_0',
'Big Labels', 'No Histogram:best&unique_0', 'Bridging Reads Histogram:best',
'No Histogram_0', 'No Histogram:best&unique', 'Bridging Reads Histogram_0',
'Ripple', 'Coverage Histogram:unique_0', 'Nested Network Style',
'Coverage Histogram:best', 'Coverage Histogram:best&unique_0',
'default black', 'No Histogram:best_0', 'No Histogram:unique',
'No Histogram:unique_0', 'Solid', 'Bridging Reads Histogram:unique',
'No Histogram:best', 'Coverage Histogram', 'BioPAX', 'Bridging Reads Histogram',
'Coverage Histogram:best_0', 'Sample1', 'Universe', 'Bridging Reads Histogram:best_0',
'Coverage Histogram:unique', 'Bridging Reads Histogram:best&unique',
'No Histogram', 'default']
:param verbose: print more
:returns: SUIDs of the affected views
"""
PARAMS=set_param(["styles"],[styles])
response=api(url=self.__url+"/apply", PARAMS=PARAMS, method="POST", verbose=verbose)
return response
|
[
"def",
"apply",
"(",
"self",
",",
"styles",
"=",
"None",
",",
"verbose",
"=",
"False",
")",
":",
"PARAMS",
"=",
"set_param",
"(",
"[",
"\"styles\"",
"]",
",",
"[",
"styles",
"]",
")",
"response",
"=",
"api",
"(",
"url",
"=",
"self",
".",
"__url",
"+",
"\"/apply\"",
",",
"PARAMS",
"=",
"PARAMS",
",",
"method",
"=",
"\"POST\"",
",",
"verbose",
"=",
"verbose",
")",
"return",
"response"
] | 55.962963 | 29.148148 |
def init_argparser_working_dir(
self, argparser,
explanation='',
help_template=(
'the working directory; %(explanation)s'
'default is current working directory (%(cwd)s)'),
):
"""
Subclass could an extra expanation on how this is used.
Arguments
explanation
Explanation text for the default help template
help_template
A standard help message for this option.
"""
cwd = self.toolchain.join_cwd()
argparser.add_argument(
'--working-dir', dest=WORKING_DIR,
metavar=metavar(WORKING_DIR),
default=cwd,
help=help_template % {'explanation': explanation, 'cwd': cwd},
)
|
[
"def",
"init_argparser_working_dir",
"(",
"self",
",",
"argparser",
",",
"explanation",
"=",
"''",
",",
"help_template",
"=",
"(",
"'the working directory; %(explanation)s'",
"'default is current working directory (%(cwd)s)'",
")",
",",
")",
":",
"cwd",
"=",
"self",
".",
"toolchain",
".",
"join_cwd",
"(",
")",
"argparser",
".",
"add_argument",
"(",
"'--working-dir'",
",",
"dest",
"=",
"WORKING_DIR",
",",
"metavar",
"=",
"metavar",
"(",
"WORKING_DIR",
")",
",",
"default",
"=",
"cwd",
",",
"help",
"=",
"help_template",
"%",
"{",
"'explanation'",
":",
"explanation",
",",
"'cwd'",
":",
"cwd",
"}",
",",
")"
] | 30.6 | 17.96 |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'link') and self.link is not None:
_dict['link'] = self.link
return _dict
|
[
"def",
"_to_dict",
"(",
"self",
")",
":",
"_dict",
"=",
"{",
"}",
"if",
"hasattr",
"(",
"self",
",",
"'link'",
")",
"and",
"self",
".",
"link",
"is",
"not",
"None",
":",
"_dict",
"[",
"'link'",
"]",
"=",
"self",
".",
"link",
"return",
"_dict"
] | 36 | 14.166667 |
def extend_array(edges, binsz, lo, hi):
"""Extend an array to encompass lo and hi values."""
numlo = int(np.ceil((edges[0] - lo) / binsz))
numhi = int(np.ceil((hi - edges[-1]) / binsz))
edges = copy.deepcopy(edges)
if numlo > 0:
edges_lo = np.linspace(edges[0] - numlo * binsz, edges[0], numlo + 1)
edges = np.concatenate((edges_lo[:-1], edges))
if numhi > 0:
edges_hi = np.linspace(edges[-1], edges[-1] + numhi * binsz, numhi + 1)
edges = np.concatenate((edges, edges_hi[1:]))
return edges
|
[
"def",
"extend_array",
"(",
"edges",
",",
"binsz",
",",
"lo",
",",
"hi",
")",
":",
"numlo",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"(",
"edges",
"[",
"0",
"]",
"-",
"lo",
")",
"/",
"binsz",
")",
")",
"numhi",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"(",
"hi",
"-",
"edges",
"[",
"-",
"1",
"]",
")",
"/",
"binsz",
")",
")",
"edges",
"=",
"copy",
".",
"deepcopy",
"(",
"edges",
")",
"if",
"numlo",
">",
"0",
":",
"edges_lo",
"=",
"np",
".",
"linspace",
"(",
"edges",
"[",
"0",
"]",
"-",
"numlo",
"*",
"binsz",
",",
"edges",
"[",
"0",
"]",
",",
"numlo",
"+",
"1",
")",
"edges",
"=",
"np",
".",
"concatenate",
"(",
"(",
"edges_lo",
"[",
":",
"-",
"1",
"]",
",",
"edges",
")",
")",
"if",
"numhi",
">",
"0",
":",
"edges_hi",
"=",
"np",
".",
"linspace",
"(",
"edges",
"[",
"-",
"1",
"]",
",",
"edges",
"[",
"-",
"1",
"]",
"+",
"numhi",
"*",
"binsz",
",",
"numhi",
"+",
"1",
")",
"edges",
"=",
"np",
".",
"concatenate",
"(",
"(",
"edges",
",",
"edges_hi",
"[",
"1",
":",
"]",
")",
")",
"return",
"edges"
] | 33.6875 | 22.5625 |
def duplicates_removed(it, already_seen=()):
"""
Returns a list with duplicates removed from the iterable `it`.
Order is preserved.
"""
lst = []
seen = set()
for i in it:
if i in seen or i in already_seen:
continue
lst.append(i)
seen.add(i)
return lst
|
[
"def",
"duplicates_removed",
"(",
"it",
",",
"already_seen",
"=",
"(",
")",
")",
":",
"lst",
"=",
"[",
"]",
"seen",
"=",
"set",
"(",
")",
"for",
"i",
"in",
"it",
":",
"if",
"i",
"in",
"seen",
"or",
"i",
"in",
"already_seen",
":",
"continue",
"lst",
".",
"append",
"(",
"i",
")",
"seen",
".",
"add",
"(",
"i",
")",
"return",
"lst"
] | 21.928571 | 17.928571 |
def getPath(self, suffix=None, extension="jar", separator=os.sep):
"""
Returns the full path, relative to the root of a Maven repository,
of the current artifact, using Maven's conventions.
In particular, it will be:
<groupId with "." replaced by <separator>>[<separator><artifactId><separator>[<version><separator><basename obtained via getFileName()>]]
By default, <separator>=os.sep
"""
assert (self._groupId is not None)
resultComponents = [
self._groupId.replace(".", separator)
]
if self._artifactId is not None:
resultComponents.append(self._artifactId)
version = self._version
if version is not None:
resultComponents.append(version.getRawString())
resultComponents.append(self.getFileName(suffix, extension))
return separator.join(resultComponents)
|
[
"def",
"getPath",
"(",
"self",
",",
"suffix",
"=",
"None",
",",
"extension",
"=",
"\"jar\"",
",",
"separator",
"=",
"os",
".",
"sep",
")",
":",
"assert",
"(",
"self",
".",
"_groupId",
"is",
"not",
"None",
")",
"resultComponents",
"=",
"[",
"self",
".",
"_groupId",
".",
"replace",
"(",
"\".\"",
",",
"separator",
")",
"]",
"if",
"self",
".",
"_artifactId",
"is",
"not",
"None",
":",
"resultComponents",
".",
"append",
"(",
"self",
".",
"_artifactId",
")",
"version",
"=",
"self",
".",
"_version",
"if",
"version",
"is",
"not",
"None",
":",
"resultComponents",
".",
"append",
"(",
"version",
".",
"getRawString",
"(",
")",
")",
"resultComponents",
".",
"append",
"(",
"self",
".",
"getFileName",
"(",
"suffix",
",",
"extension",
")",
")",
"return",
"separator",
".",
"join",
"(",
"resultComponents",
")"
] | 34.037037 | 24.407407 |
def check_arguments(cls, conf):
"""
Callback to perform sanity checking for the plugin's specific
parameters.
"""
# Perform sanity checking on CIDR
utils.ip_check(conf['fixed_cidr'], netmask_expected=True)
# Perform sanity checking on host list
for host in conf['fixed_hosts'].split(":"):
utils.ip_check(host)
|
[
"def",
"check_arguments",
"(",
"cls",
",",
"conf",
")",
":",
"# Perform sanity checking on CIDR",
"utils",
".",
"ip_check",
"(",
"conf",
"[",
"'fixed_cidr'",
"]",
",",
"netmask_expected",
"=",
"True",
")",
"# Perform sanity checking on host list",
"for",
"host",
"in",
"conf",
"[",
"'fixed_hosts'",
"]",
".",
"split",
"(",
"\":\"",
")",
":",
"utils",
".",
"ip_check",
"(",
"host",
")"
] | 31.333333 | 15.833333 |
def save(self):
"""This function is called by the parent dialog window when the user selects to save the settings."""
if self.path is None: # Delete requested, so remove the current path from sys.path, if present
if self.config_manager.userCodeDir is not None:
sys.path.remove(self.config_manager.userCodeDir)
self.config_manager.userCodeDir = None
logger.info("Removed custom module search path from configuration and sys.path.")
else:
if self.path != self.config_manager.userCodeDir:
if self.config_manager.userCodeDir is not None:
sys.path.remove(self.config_manager.userCodeDir)
sys.path.append(self.path)
self.config_manager.userCodeDir = self.path
logger.info("Saved custom module search path and added it to sys.path: {}".format(self.path))
|
[
"def",
"save",
"(",
"self",
")",
":",
"if",
"self",
".",
"path",
"is",
"None",
":",
"# Delete requested, so remove the current path from sys.path, if present",
"if",
"self",
".",
"config_manager",
".",
"userCodeDir",
"is",
"not",
"None",
":",
"sys",
".",
"path",
".",
"remove",
"(",
"self",
".",
"config_manager",
".",
"userCodeDir",
")",
"self",
".",
"config_manager",
".",
"userCodeDir",
"=",
"None",
"logger",
".",
"info",
"(",
"\"Removed custom module search path from configuration and sys.path.\"",
")",
"else",
":",
"if",
"self",
".",
"path",
"!=",
"self",
".",
"config_manager",
".",
"userCodeDir",
":",
"if",
"self",
".",
"config_manager",
".",
"userCodeDir",
"is",
"not",
"None",
":",
"sys",
".",
"path",
".",
"remove",
"(",
"self",
".",
"config_manager",
".",
"userCodeDir",
")",
"sys",
".",
"path",
".",
"append",
"(",
"self",
".",
"path",
")",
"self",
".",
"config_manager",
".",
"userCodeDir",
"=",
"self",
".",
"path",
"logger",
".",
"info",
"(",
"\"Saved custom module search path and added it to sys.path: {}\"",
".",
"format",
"(",
"self",
".",
"path",
")",
")"
] | 61 | 28.666667 |
def TreeCollectionStore(repos_dict=None,
repos_par=None,
with_caching=True,
assumed_doc_version=None,
git_ssh=None,
pkey=None,
git_action_class=TreeCollectionsGitAction,
mirror_info=None,
infrastructure_commit_author='OpenTree API <[email protected]>'):
"""Factory function for a _TreeCollectionStore object.
A wrapper around the _TreeCollectionStore class instantiation for
the most common use case: a singleton _TreeCollectionStore.
If you need distinct _TreeCollectionStore objects, you'll need to
call that class directly.
"""
global _THE_TREE_COLLECTION_STORE
if _THE_TREE_COLLECTION_STORE is None:
_THE_TREE_COLLECTION_STORE = _TreeCollectionStore(repos_dict=repos_dict,
repos_par=repos_par,
with_caching=with_caching,
assumed_doc_version=assumed_doc_version,
git_ssh=git_ssh,
pkey=pkey,
git_action_class=git_action_class,
mirror_info=mirror_info,
infrastructure_commit_author=infrastructure_commit_author)
return _THE_TREE_COLLECTION_STORE
|
[
"def",
"TreeCollectionStore",
"(",
"repos_dict",
"=",
"None",
",",
"repos_par",
"=",
"None",
",",
"with_caching",
"=",
"True",
",",
"assumed_doc_version",
"=",
"None",
",",
"git_ssh",
"=",
"None",
",",
"pkey",
"=",
"None",
",",
"git_action_class",
"=",
"TreeCollectionsGitAction",
",",
"mirror_info",
"=",
"None",
",",
"infrastructure_commit_author",
"=",
"'OpenTree API <[email protected]>'",
")",
":",
"global",
"_THE_TREE_COLLECTION_STORE",
"if",
"_THE_TREE_COLLECTION_STORE",
"is",
"None",
":",
"_THE_TREE_COLLECTION_STORE",
"=",
"_TreeCollectionStore",
"(",
"repos_dict",
"=",
"repos_dict",
",",
"repos_par",
"=",
"repos_par",
",",
"with_caching",
"=",
"with_caching",
",",
"assumed_doc_version",
"=",
"assumed_doc_version",
",",
"git_ssh",
"=",
"git_ssh",
",",
"pkey",
"=",
"pkey",
",",
"git_action_class",
"=",
"git_action_class",
",",
"mirror_info",
"=",
"mirror_info",
",",
"infrastructure_commit_author",
"=",
"infrastructure_commit_author",
")",
"return",
"_THE_TREE_COLLECTION_STORE"
] | 58.035714 | 23.357143 |
def set_value(self, value):
"""The value have to be in the form '10px' or '10%', so numeric value plus measure unit
"""
v = 0
measure_unit = 'px'
try:
v = int(float(value.replace('px', '')))
except ValueError:
try:
v = int(float(value.replace('%', '')))
measure_unit = '%'
except ValueError:
pass
self.numInput.set_value(v)
self.dropMeasureUnit.set_value(measure_unit)
|
[
"def",
"set_value",
"(",
"self",
",",
"value",
")",
":",
"v",
"=",
"0",
"measure_unit",
"=",
"'px'",
"try",
":",
"v",
"=",
"int",
"(",
"float",
"(",
"value",
".",
"replace",
"(",
"'px'",
",",
"''",
")",
")",
")",
"except",
"ValueError",
":",
"try",
":",
"v",
"=",
"int",
"(",
"float",
"(",
"value",
".",
"replace",
"(",
"'%'",
",",
"''",
")",
")",
")",
"measure_unit",
"=",
"'%'",
"except",
"ValueError",
":",
"pass",
"self",
".",
"numInput",
".",
"set_value",
"(",
"v",
")",
"self",
".",
"dropMeasureUnit",
".",
"set_value",
"(",
"measure_unit",
")"
] | 33.466667 | 13.2 |
def addFilter(self, filterclass):
"""Add a filter class to the parser."""
if filterclass not in self.filters:
self.filters.append(filterclass)
|
[
"def",
"addFilter",
"(",
"self",
",",
"filterclass",
")",
":",
"if",
"filterclass",
"not",
"in",
"self",
".",
"filters",
":",
"self",
".",
"filters",
".",
"append",
"(",
"filterclass",
")"
] | 41.75 | 3.5 |
def current_timestamp():
"""Returns current time as ISO8601 formatted string in the Zulu TZ"""
now = datetime.utcnow()
timestamp = now.isoformat()[0:19] + 'Z'
debug("generated timestamp: {now}".format(now=timestamp))
return timestamp
|
[
"def",
"current_timestamp",
"(",
")",
":",
"now",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"timestamp",
"=",
"now",
".",
"isoformat",
"(",
")",
"[",
"0",
":",
"19",
"]",
"+",
"'Z'",
"debug",
"(",
"\"generated timestamp: {now}\"",
".",
"format",
"(",
"now",
"=",
"timestamp",
")",
")",
"return",
"timestamp"
] | 31 | 19.125 |
def get_child_repositories(self, repository_id):
"""Gets the children of the given repository.
arg: repository_id (osid.id.Id): the ``Id`` to query
return: (osid.repository.RepositoryList) - the children of the
repository
raise: NotFound - ``repository_id`` not found
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_child_bins
if self._catalog_session is not None:
return self._catalog_session.get_child_catalogs(catalog_id=repository_id)
return RepositoryLookupSession(
self._proxy,
self._runtime).get_repositories_by_ids(
list(self.get_child_repository_ids(repository_id)))
|
[
"def",
"get_child_repositories",
"(",
"self",
",",
"repository_id",
")",
":",
"# Implemented from template for",
"# osid.resource.BinHierarchySession.get_child_bins",
"if",
"self",
".",
"_catalog_session",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_catalog_session",
".",
"get_child_catalogs",
"(",
"catalog_id",
"=",
"repository_id",
")",
"return",
"RepositoryLookupSession",
"(",
"self",
".",
"_proxy",
",",
"self",
".",
"_runtime",
")",
".",
"get_repositories_by_ids",
"(",
"list",
"(",
"self",
".",
"get_child_repository_ids",
"(",
"repository_id",
")",
")",
")"
] | 46.47619 | 17.904762 |
def area4info():
"""
Get some info about the package.
:return: Package info
:rtype: str
"""
# Info variables:
name = "area4"
author = "https://github.com/RDIL"
author_email = rdillib.get_email()
description = "Dividers in Python, the easy way!"
return "{0}: {1}\n{2}: {3}\n{4}: {5}\n{6}: {7}".format(
"Name:", name,
"Author:", author,
"Author Email:", author_email,
"Description:", description
)
|
[
"def",
"area4info",
"(",
")",
":",
"# Info variables:",
"name",
"=",
"\"area4\"",
"author",
"=",
"\"https://github.com/RDIL\"",
"author_email",
"=",
"rdillib",
".",
"get_email",
"(",
")",
"description",
"=",
"\"Dividers in Python, the easy way!\"",
"return",
"\"{0}: {1}\\n{2}: {3}\\n{4}: {5}\\n{6}: {7}\"",
".",
"format",
"(",
"\"Name:\"",
",",
"name",
",",
"\"Author:\"",
",",
"author",
",",
"\"Author Email:\"",
",",
"author_email",
",",
"\"Description:\"",
",",
"description",
")"
] | 25.5 | 14.388889 |
async def process_callback_result(self, callback_result):
"""
Corresponding processing for the invalid callback result
:param item:
:return:
"""
callback_result_name = type(callback_result).__name__
process_func_name = self.callback_result_map.get(
callback_result_name, '')
process_func = getattr(self, process_func_name, None)
if process_func is not None:
await process_func(callback_result)
else:
raise InvalidCallbackResult(
f'<Parse invalid callback result type: {callback_result_name}>'
)
|
[
"async",
"def",
"process_callback_result",
"(",
"self",
",",
"callback_result",
")",
":",
"callback_result_name",
"=",
"type",
"(",
"callback_result",
")",
".",
"__name__",
"process_func_name",
"=",
"self",
".",
"callback_result_map",
".",
"get",
"(",
"callback_result_name",
",",
"''",
")",
"process_func",
"=",
"getattr",
"(",
"self",
",",
"process_func_name",
",",
"None",
")",
"if",
"process_func",
"is",
"not",
"None",
":",
"await",
"process_func",
"(",
"callback_result",
")",
"else",
":",
"raise",
"InvalidCallbackResult",
"(",
"f'<Parse invalid callback result type: {callback_result_name}>'",
")"
] | 38.9375 | 15.6875 |
def recode (inlist,listmap,cols=None):
"""
Changes the values in a list to a new set of values (useful when
you need to recode data from (e.g.) strings to numbers. cols defaults
to None (meaning all columns are recoded).
Usage: recode (inlist,listmap,cols=None) cols=recode cols, listmap=2D list
Returns: inlist with the appropriate values replaced with new ones
"""
lst = copy.deepcopy(inlist)
if cols != None:
if type(cols) not in [ListType,TupleType]:
cols = [cols]
for col in cols:
for row in range(len(lst)):
try:
idx = colex(listmap,0).index(lst[row][col])
lst[row][col] = listmap[idx][1]
except ValueError:
pass
else:
for row in range(len(lst)):
for col in range(len(lst)):
try:
idx = colex(listmap,0).index(lst[row][col])
lst[row][col] = listmap[idx][1]
except ValueError:
pass
return lst
|
[
"def",
"recode",
"(",
"inlist",
",",
"listmap",
",",
"cols",
"=",
"None",
")",
":",
"lst",
"=",
"copy",
".",
"deepcopy",
"(",
"inlist",
")",
"if",
"cols",
"!=",
"None",
":",
"if",
"type",
"(",
"cols",
")",
"not",
"in",
"[",
"ListType",
",",
"TupleType",
"]",
":",
"cols",
"=",
"[",
"cols",
"]",
"for",
"col",
"in",
"cols",
":",
"for",
"row",
"in",
"range",
"(",
"len",
"(",
"lst",
")",
")",
":",
"try",
":",
"idx",
"=",
"colex",
"(",
"listmap",
",",
"0",
")",
".",
"index",
"(",
"lst",
"[",
"row",
"]",
"[",
"col",
"]",
")",
"lst",
"[",
"row",
"]",
"[",
"col",
"]",
"=",
"listmap",
"[",
"idx",
"]",
"[",
"1",
"]",
"except",
"ValueError",
":",
"pass",
"else",
":",
"for",
"row",
"in",
"range",
"(",
"len",
"(",
"lst",
")",
")",
":",
"for",
"col",
"in",
"range",
"(",
"len",
"(",
"lst",
")",
")",
":",
"try",
":",
"idx",
"=",
"colex",
"(",
"listmap",
",",
"0",
")",
".",
"index",
"(",
"lst",
"[",
"row",
"]",
"[",
"col",
"]",
")",
"lst",
"[",
"row",
"]",
"[",
"col",
"]",
"=",
"listmap",
"[",
"idx",
"]",
"[",
"1",
"]",
"except",
"ValueError",
":",
"pass",
"return",
"lst"
] | 35.758621 | 15.413793 |
def lower_ir(ir_blocks, query_metadata_table, type_equivalence_hints=None):
"""Lower the IR blocks into a form that can be represented by a SQL query.
Args:
ir_blocks: list of IR blocks to lower into SQL-compatible form
query_metadata_table: QueryMetadataTable object containing all metadata collected during
query processing, including location metadata (e.g. which locations
are folded or optional).
type_equivalence_hints: optional dict of GraphQL interface or type -> GraphQL union.
Used as a workaround for GraphQL's lack of support for
inheritance across "types" (i.e. non-interfaces), as well as a
workaround for Gremlin's total lack of inheritance-awareness.
The key-value pairs in the dict specify that the "key" type
is equivalent to the "value" type, i.e. that the GraphQL type or
interface in the key is the most-derived common supertype
of every GraphQL type in the "value" GraphQL union.
Recursive expansion of type equivalence hints is not performed,
and only type-level correctness of this argument is enforced.
See README.md for more details on everything this parameter does.
*****
Be very careful with this option, as bad input here will
lead to incorrect output queries being generated.
*****
Returns:
tree representation of IR blocks for recursive traversal by SQL backend.
"""
_validate_all_blocks_supported(ir_blocks, query_metadata_table)
construct_result = _get_construct_result(ir_blocks)
query_path_to_location_info = _map_query_path_to_location_info(query_metadata_table)
query_path_to_output_fields = _map_query_path_to_outputs(
construct_result, query_path_to_location_info)
block_index_to_location = _map_block_index_to_location(ir_blocks)
# perform lowering steps
ir_blocks = lower_unary_transformations(ir_blocks)
ir_blocks = lower_unsupported_metafield_expressions(ir_blocks)
# iteratively construct SqlTree
query_path_to_node = {}
query_path_to_filters = {}
tree_root = None
for index, block in enumerate(ir_blocks):
if isinstance(block, constants.SKIPPABLE_BLOCK_TYPES):
continue
location = block_index_to_location[index]
if isinstance(block, (blocks.QueryRoot,)):
query_path = location.query_path
if tree_root is not None:
raise AssertionError(
u'Encountered QueryRoot {} but tree root is already set to {} during '
u'construction of SQL query tree for IR blocks {} with query '
u'metadata table {}'.format(
block, tree_root, ir_blocks, query_metadata_table))
tree_root = SqlNode(block=block, query_path=query_path)
query_path_to_node[query_path] = tree_root
elif isinstance(block, blocks.Filter):
query_path_to_filters.setdefault(query_path, []).append(block)
else:
raise AssertionError(
u'Unsupported block {} unexpectedly passed validation for IR blocks '
u'{} with query metadata table {} .'.format(block, ir_blocks, query_metadata_table))
return SqlQueryTree(tree_root, query_path_to_location_info, query_path_to_output_fields,
query_path_to_filters, query_path_to_node)
|
[
"def",
"lower_ir",
"(",
"ir_blocks",
",",
"query_metadata_table",
",",
"type_equivalence_hints",
"=",
"None",
")",
":",
"_validate_all_blocks_supported",
"(",
"ir_blocks",
",",
"query_metadata_table",
")",
"construct_result",
"=",
"_get_construct_result",
"(",
"ir_blocks",
")",
"query_path_to_location_info",
"=",
"_map_query_path_to_location_info",
"(",
"query_metadata_table",
")",
"query_path_to_output_fields",
"=",
"_map_query_path_to_outputs",
"(",
"construct_result",
",",
"query_path_to_location_info",
")",
"block_index_to_location",
"=",
"_map_block_index_to_location",
"(",
"ir_blocks",
")",
"# perform lowering steps",
"ir_blocks",
"=",
"lower_unary_transformations",
"(",
"ir_blocks",
")",
"ir_blocks",
"=",
"lower_unsupported_metafield_expressions",
"(",
"ir_blocks",
")",
"# iteratively construct SqlTree",
"query_path_to_node",
"=",
"{",
"}",
"query_path_to_filters",
"=",
"{",
"}",
"tree_root",
"=",
"None",
"for",
"index",
",",
"block",
"in",
"enumerate",
"(",
"ir_blocks",
")",
":",
"if",
"isinstance",
"(",
"block",
",",
"constants",
".",
"SKIPPABLE_BLOCK_TYPES",
")",
":",
"continue",
"location",
"=",
"block_index_to_location",
"[",
"index",
"]",
"if",
"isinstance",
"(",
"block",
",",
"(",
"blocks",
".",
"QueryRoot",
",",
")",
")",
":",
"query_path",
"=",
"location",
".",
"query_path",
"if",
"tree_root",
"is",
"not",
"None",
":",
"raise",
"AssertionError",
"(",
"u'Encountered QueryRoot {} but tree root is already set to {} during '",
"u'construction of SQL query tree for IR blocks {} with query '",
"u'metadata table {}'",
".",
"format",
"(",
"block",
",",
"tree_root",
",",
"ir_blocks",
",",
"query_metadata_table",
")",
")",
"tree_root",
"=",
"SqlNode",
"(",
"block",
"=",
"block",
",",
"query_path",
"=",
"query_path",
")",
"query_path_to_node",
"[",
"query_path",
"]",
"=",
"tree_root",
"elif",
"isinstance",
"(",
"block",
",",
"blocks",
".",
"Filter",
")",
":",
"query_path_to_filters",
".",
"setdefault",
"(",
"query_path",
",",
"[",
"]",
")",
".",
"append",
"(",
"block",
")",
"else",
":",
"raise",
"AssertionError",
"(",
"u'Unsupported block {} unexpectedly passed validation for IR blocks '",
"u'{} with query metadata table {} .'",
".",
"format",
"(",
"block",
",",
"ir_blocks",
",",
"query_metadata_table",
")",
")",
"return",
"SqlQueryTree",
"(",
"tree_root",
",",
"query_path_to_location_info",
",",
"query_path_to_output_fields",
",",
"query_path_to_filters",
",",
"query_path_to_node",
")"
] | 57.738462 | 29.507692 |
def compute_bounds(feed: "Feed") -> Tuple:
"""
Return the tuple (min longitude, min latitude, max longitude,
max latitude) where the longitudes and latitude vary across all
the Feed's stop coordinates.
"""
lons, lats = feed.stops["stop_lon"], feed.stops["stop_lat"]
return lons.min(), lats.min(), lons.max(), lats.max()
|
[
"def",
"compute_bounds",
"(",
"feed",
":",
"\"Feed\"",
")",
"->",
"Tuple",
":",
"lons",
",",
"lats",
"=",
"feed",
".",
"stops",
"[",
"\"stop_lon\"",
"]",
",",
"feed",
".",
"stops",
"[",
"\"stop_lat\"",
"]",
"return",
"lons",
".",
"min",
"(",
")",
",",
"lats",
".",
"min",
"(",
")",
",",
"lons",
".",
"max",
"(",
")",
",",
"lats",
".",
"max",
"(",
")"
] | 42.5 | 12.75 |
def handle(self, type: str, *, kwargs: dict = None) -> Callable:
"""
Register an event handler with the :obj:`Layabout` instance.
Args:
type: The name of a Slack RTM API event to be handled. As a
special case, although it is not a proper RTM event, ``*`` may
be provided to handle all events. For more information about
available events see the
`Slack RTM API <https://api.slack.com/rtm>`_.
kwargs: Optional arbitrary keyword arguments passed to the event
handler when the event is triggered.
Returns:
A decorator that validates and registers a Layabout event handler.
Raises:
TypeError: If the decorated :obj:`Callable`'s signature does not
accept at least 2 parameters.
"""
def decorator(fn: Callable) -> Callable:
# Validate that the wrapped callable is a suitable event handler.
sig = signature(fn)
num_params = len(sig.parameters)
if num_params < 2:
raise TypeError(_format_parameter_error_message(
fn.__name__, sig, num_params))
# Register a tuple of the callable and its kwargs, if any.
self._handlers[type].append((fn, kwargs or {}))
return fn
return decorator
|
[
"def",
"handle",
"(",
"self",
",",
"type",
":",
"str",
",",
"*",
",",
"kwargs",
":",
"dict",
"=",
"None",
")",
"->",
"Callable",
":",
"def",
"decorator",
"(",
"fn",
":",
"Callable",
")",
"->",
"Callable",
":",
"# Validate that the wrapped callable is a suitable event handler.",
"sig",
"=",
"signature",
"(",
"fn",
")",
"num_params",
"=",
"len",
"(",
"sig",
".",
"parameters",
")",
"if",
"num_params",
"<",
"2",
":",
"raise",
"TypeError",
"(",
"_format_parameter_error_message",
"(",
"fn",
".",
"__name__",
",",
"sig",
",",
"num_params",
")",
")",
"# Register a tuple of the callable and its kwargs, if any.",
"self",
".",
"_handlers",
"[",
"type",
"]",
".",
"append",
"(",
"(",
"fn",
",",
"kwargs",
"or",
"{",
"}",
")",
")",
"return",
"fn",
"return",
"decorator"
] | 41.484848 | 23.242424 |
def get_monitor() -> Optional[resource_pb2_grpc.ResourceMonitorStub]:
"""
Returns the current resource monitoring service client for RPC communications.
"""
monitor = SETTINGS.monitor
if not monitor:
require_test_mode_enabled()
return monitor
|
[
"def",
"get_monitor",
"(",
")",
"->",
"Optional",
"[",
"resource_pb2_grpc",
".",
"ResourceMonitorStub",
"]",
":",
"monitor",
"=",
"SETTINGS",
".",
"monitor",
"if",
"not",
"monitor",
":",
"require_test_mode_enabled",
"(",
")",
"return",
"monitor"
] | 33.375 | 16.125 |
def page_posts(self, page_id, after='', post_type="posts",
include_hidden=False, fields=None, **params):
"""
:param page_id:
:param after:
:param post_type: Can be 'posts', 'feed', 'tagged', 'promotable_posts'
:param include_hidden:
:param fields:
:param params:
:return:
"""
if fields:
fields = ",".join(fields)
parameters = {"access_token": self.key,
"after": after,
"fields": fields,
"include_hidden": include_hidden}
parameters = self.merge_params(parameters, params)
return self.api_call('%s/%s' % (page_id, post_type), parameters)
|
[
"def",
"page_posts",
"(",
"self",
",",
"page_id",
",",
"after",
"=",
"''",
",",
"post_type",
"=",
"\"posts\"",
",",
"include_hidden",
"=",
"False",
",",
"fields",
"=",
"None",
",",
"*",
"*",
"params",
")",
":",
"if",
"fields",
":",
"fields",
"=",
"\",\"",
".",
"join",
"(",
"fields",
")",
"parameters",
"=",
"{",
"\"access_token\"",
":",
"self",
".",
"key",
",",
"\"after\"",
":",
"after",
",",
"\"fields\"",
":",
"fields",
",",
"\"include_hidden\"",
":",
"include_hidden",
"}",
"parameters",
"=",
"self",
".",
"merge_params",
"(",
"parameters",
",",
"params",
")",
"return",
"self",
".",
"api_call",
"(",
"'%s/%s'",
"%",
"(",
"page_id",
",",
"post_type",
")",
",",
"parameters",
")"
] | 32.217391 | 19.086957 |
def apply_trapping(self, outlets):
"""
Apply trapping based on algorithm described by Y. Masson [1].
It is applied as a post-process and runs the percolation algorithm in
reverse assessing the occupancy of pore neighbors. Consider the
following scenario when running standard IP without trapping,
3 situations can happen after each invasion step:
The number of defending clusters stays the same and clusters can
shrink
A cluster of size one is suppressed
A cluster is split into multiple clusters
In reverse the following opposite situations can happen:
The number of defending clusters stays the same and clusters can
grow
A cluster of size one is created
Mutliple clusters merge into one cluster
With trapping the reversed rules are adjusted so that:
Only clusters that do not connect to a sink can grow and merge.
At the point that a neighbor connected to a sink is touched the
trapped cluster stops growing as this is the point of trapping in
forward invasion time.
Logger info displays the invasion sequence and pore index and a message
with condition number based on the modified trapping rules and the
assignment of the pore to a given cluster.
Initially all invaded pores are given cluster label -1
Outlets / Sinks are given -2
New clusters that grow into fully trapped clusters are either
identified at the point of breakthrough or grow from nothing if the
full invasion sequence is run, they are assigned numbers from 0 up.
Ref:
[1] Masson, Y., 2016. A fast two-step algorithm for invasion
percolation with trapping. Computers & Geosciences, 90, pp.41-48
Parameters
----------
outlets : list or array of pore indices for defending fluid to escape
through
Returns
-------
Creates a throat array called 'pore.clusters' in the Algorithm
dictionary. Any positive number is a trapped cluster
Also creates 2 boolean arrays Np and Nt long called '<element>.trapped'
"""
# First see if network is fully invaded
net = self.project.network
invaded_ps = self['pore.invasion_sequence'] > -1
if ~np.all(invaded_ps):
# Put defending phase into clusters
clusters = net.find_clusters2(~invaded_ps)
# Identify clusters that are connected to an outlet and set to -2
# -1 is the invaded fluid
# -2 is the defender fluid able to escape
# All others now trapped clusters which grow as invasion is reversed
out_clusters = sp.unique(clusters[outlets])
for c in out_clusters:
if c >= 0:
clusters[clusters == c] = -2
else:
# Go from end
clusters = np.ones(net.Np, dtype=int)*-1
clusters[outlets] = -2
# Turn into a list for indexing
inv_seq = np.vstack((self['pore.invasion_sequence'].astype(int),
np.arange(0, net.Np, dtype=int))).T
# Reverse sort list
inv_seq = inv_seq[inv_seq[:, 0].argsort()][::-1]
next_cluster_num = np.max(clusters)+1
# For all the steps after the inlets are set up to break-through
# Reverse the sequence and assess the neighbors cluster state
stopped_clusters = np.zeros(net.Np, dtype=bool)
all_neighbors = net.find_neighbor_pores(net.pores(), flatten=False,
include_input=True)
for un_seq, pore in inv_seq:
if pore not in outlets and un_seq > 0: # Skip inlets and outlets
nc = clusters[all_neighbors[pore]] # Neighboring clusters
unique_ns = np.unique(nc[nc != -1]) # Unique Neighbors
seq_pore = "S:"+str(un_seq)+" P:"+str(pore)
if np.all(nc == -1):
# This is the start of a new trapped cluster
clusters[pore] = next_cluster_num
next_cluster_num += 1
msg = (seq_pore+" C:1 new cluster number: " +
str(clusters[pore]))
logger.info(msg)
elif len(unique_ns) == 1:
# Grow the only connected neighboring cluster
if not stopped_clusters[unique_ns[0]]:
clusters[pore] = unique_ns[0]
msg = (seq_pore+" C:2 joins cluster number: " +
str(clusters[pore]))
logger.info(msg)
else:
clusters[pore] = -2
elif -2 in unique_ns:
# We have reached a sink neighbor, stop growing cluster
msg = (seq_pore+" C:3 joins sink cluster")
logger.info(msg)
clusters[pore] = -2
# Stop growth and merging
stopped_clusters[unique_ns[unique_ns > -1]] = True
else:
# We might be able to do some merging
# Check if any stopped clusters are neighbors
if np.any(stopped_clusters[unique_ns]):
msg = (seq_pore+" C:4 joins sink cluster")
logger.info(msg)
clusters[pore] = -2
# Stop growing all neighboring clusters
stopped_clusters[unique_ns] = True
else:
# Merge multiple un-stopped trapped clusters
new_num = unique_ns[0]
clusters[pore] = new_num
for c in unique_ns:
clusters[clusters == c] = new_num
msg = (seq_pore + " C:5 merge clusters: " +
str(c) + " into "+str(new_num))
logger.info(msg)
# And now return clusters
self['pore.clusters'] = clusters
logger.info("Number of trapped clusters" +
str(np.sum(np.unique(clusters) >= 0)))
self['pore.trapped'] = self['pore.clusters'] > -1
trapped_ts = net.find_neighbor_throats(self['pore.trapped'])
self['throat.trapped'] = np.zeros([net.Nt], dtype=bool)
self['throat.trapped'][trapped_ts] = True
self['pore.invasion_sequence'][self['pore.trapped']] = -1
self['throat.invasion_sequence'][self['throat.trapped']] = -1
|
[
"def",
"apply_trapping",
"(",
"self",
",",
"outlets",
")",
":",
"# First see if network is fully invaded",
"net",
"=",
"self",
".",
"project",
".",
"network",
"invaded_ps",
"=",
"self",
"[",
"'pore.invasion_sequence'",
"]",
">",
"-",
"1",
"if",
"~",
"np",
".",
"all",
"(",
"invaded_ps",
")",
":",
"# Put defending phase into clusters",
"clusters",
"=",
"net",
".",
"find_clusters2",
"(",
"~",
"invaded_ps",
")",
"# Identify clusters that are connected to an outlet and set to -2",
"# -1 is the invaded fluid",
"# -2 is the defender fluid able to escape",
"# All others now trapped clusters which grow as invasion is reversed",
"out_clusters",
"=",
"sp",
".",
"unique",
"(",
"clusters",
"[",
"outlets",
"]",
")",
"for",
"c",
"in",
"out_clusters",
":",
"if",
"c",
">=",
"0",
":",
"clusters",
"[",
"clusters",
"==",
"c",
"]",
"=",
"-",
"2",
"else",
":",
"# Go from end",
"clusters",
"=",
"np",
".",
"ones",
"(",
"net",
".",
"Np",
",",
"dtype",
"=",
"int",
")",
"*",
"-",
"1",
"clusters",
"[",
"outlets",
"]",
"=",
"-",
"2",
"# Turn into a list for indexing",
"inv_seq",
"=",
"np",
".",
"vstack",
"(",
"(",
"self",
"[",
"'pore.invasion_sequence'",
"]",
".",
"astype",
"(",
"int",
")",
",",
"np",
".",
"arange",
"(",
"0",
",",
"net",
".",
"Np",
",",
"dtype",
"=",
"int",
")",
")",
")",
".",
"T",
"# Reverse sort list",
"inv_seq",
"=",
"inv_seq",
"[",
"inv_seq",
"[",
":",
",",
"0",
"]",
".",
"argsort",
"(",
")",
"]",
"[",
":",
":",
"-",
"1",
"]",
"next_cluster_num",
"=",
"np",
".",
"max",
"(",
"clusters",
")",
"+",
"1",
"# For all the steps after the inlets are set up to break-through",
"# Reverse the sequence and assess the neighbors cluster state",
"stopped_clusters",
"=",
"np",
".",
"zeros",
"(",
"net",
".",
"Np",
",",
"dtype",
"=",
"bool",
")",
"all_neighbors",
"=",
"net",
".",
"find_neighbor_pores",
"(",
"net",
".",
"pores",
"(",
")",
",",
"flatten",
"=",
"False",
",",
"include_input",
"=",
"True",
")",
"for",
"un_seq",
",",
"pore",
"in",
"inv_seq",
":",
"if",
"pore",
"not",
"in",
"outlets",
"and",
"un_seq",
">",
"0",
":",
"# Skip inlets and outlets",
"nc",
"=",
"clusters",
"[",
"all_neighbors",
"[",
"pore",
"]",
"]",
"# Neighboring clusters",
"unique_ns",
"=",
"np",
".",
"unique",
"(",
"nc",
"[",
"nc",
"!=",
"-",
"1",
"]",
")",
"# Unique Neighbors",
"seq_pore",
"=",
"\"S:\"",
"+",
"str",
"(",
"un_seq",
")",
"+",
"\" P:\"",
"+",
"str",
"(",
"pore",
")",
"if",
"np",
".",
"all",
"(",
"nc",
"==",
"-",
"1",
")",
":",
"# This is the start of a new trapped cluster",
"clusters",
"[",
"pore",
"]",
"=",
"next_cluster_num",
"next_cluster_num",
"+=",
"1",
"msg",
"=",
"(",
"seq_pore",
"+",
"\" C:1 new cluster number: \"",
"+",
"str",
"(",
"clusters",
"[",
"pore",
"]",
")",
")",
"logger",
".",
"info",
"(",
"msg",
")",
"elif",
"len",
"(",
"unique_ns",
")",
"==",
"1",
":",
"# Grow the only connected neighboring cluster",
"if",
"not",
"stopped_clusters",
"[",
"unique_ns",
"[",
"0",
"]",
"]",
":",
"clusters",
"[",
"pore",
"]",
"=",
"unique_ns",
"[",
"0",
"]",
"msg",
"=",
"(",
"seq_pore",
"+",
"\" C:2 joins cluster number: \"",
"+",
"str",
"(",
"clusters",
"[",
"pore",
"]",
")",
")",
"logger",
".",
"info",
"(",
"msg",
")",
"else",
":",
"clusters",
"[",
"pore",
"]",
"=",
"-",
"2",
"elif",
"-",
"2",
"in",
"unique_ns",
":",
"# We have reached a sink neighbor, stop growing cluster",
"msg",
"=",
"(",
"seq_pore",
"+",
"\" C:3 joins sink cluster\"",
")",
"logger",
".",
"info",
"(",
"msg",
")",
"clusters",
"[",
"pore",
"]",
"=",
"-",
"2",
"# Stop growth and merging",
"stopped_clusters",
"[",
"unique_ns",
"[",
"unique_ns",
">",
"-",
"1",
"]",
"]",
"=",
"True",
"else",
":",
"# We might be able to do some merging",
"# Check if any stopped clusters are neighbors",
"if",
"np",
".",
"any",
"(",
"stopped_clusters",
"[",
"unique_ns",
"]",
")",
":",
"msg",
"=",
"(",
"seq_pore",
"+",
"\" C:4 joins sink cluster\"",
")",
"logger",
".",
"info",
"(",
"msg",
")",
"clusters",
"[",
"pore",
"]",
"=",
"-",
"2",
"# Stop growing all neighboring clusters",
"stopped_clusters",
"[",
"unique_ns",
"]",
"=",
"True",
"else",
":",
"# Merge multiple un-stopped trapped clusters",
"new_num",
"=",
"unique_ns",
"[",
"0",
"]",
"clusters",
"[",
"pore",
"]",
"=",
"new_num",
"for",
"c",
"in",
"unique_ns",
":",
"clusters",
"[",
"clusters",
"==",
"c",
"]",
"=",
"new_num",
"msg",
"=",
"(",
"seq_pore",
"+",
"\" C:5 merge clusters: \"",
"+",
"str",
"(",
"c",
")",
"+",
"\" into \"",
"+",
"str",
"(",
"new_num",
")",
")",
"logger",
".",
"info",
"(",
"msg",
")",
"# And now return clusters",
"self",
"[",
"'pore.clusters'",
"]",
"=",
"clusters",
"logger",
".",
"info",
"(",
"\"Number of trapped clusters\"",
"+",
"str",
"(",
"np",
".",
"sum",
"(",
"np",
".",
"unique",
"(",
"clusters",
")",
">=",
"0",
")",
")",
")",
"self",
"[",
"'pore.trapped'",
"]",
"=",
"self",
"[",
"'pore.clusters'",
"]",
">",
"-",
"1",
"trapped_ts",
"=",
"net",
".",
"find_neighbor_throats",
"(",
"self",
"[",
"'pore.trapped'",
"]",
")",
"self",
"[",
"'throat.trapped'",
"]",
"=",
"np",
".",
"zeros",
"(",
"[",
"net",
".",
"Nt",
"]",
",",
"dtype",
"=",
"bool",
")",
"self",
"[",
"'throat.trapped'",
"]",
"[",
"trapped_ts",
"]",
"=",
"True",
"self",
"[",
"'pore.invasion_sequence'",
"]",
"[",
"self",
"[",
"'pore.trapped'",
"]",
"]",
"=",
"-",
"1",
"self",
"[",
"'throat.invasion_sequence'",
"]",
"[",
"self",
"[",
"'throat.trapped'",
"]",
"]",
"=",
"-",
"1"
] | 49.014815 | 19.888889 |
def compare_elements(element1,
element2,
compare_electron_shells_meta=False,
compare_ecp_pots_meta=False,
compare_meta=False,
rel_tol=0.0):
'''
Determine if the basis information for two elements is the same as another
Exponents/coefficients are compared using a tolerance.
Parameters
----------
element1 : dict
Basis information for an element
element2 : dict
Basis information for another element
compare_electron_shells_meta : bool
Compare the metadata of electron shells
compare_ecp_pots_meta : bool
Compare the metadata of ECP potentials
compare_meta : bool
Compare the overall element metadata
rel_tol : float
Maximum relative error that is considered equal
'''
if not _compare_keys(element1, element2, 'electron_shells', electron_shells_are_equal,
compare_electron_shells_meta, rel_tol):
return False
if not _compare_keys(element1, element2, 'ecp_potentials', ecp_pots_are_equal, compare_ecp_pots_meta, rel_tol):
return False
if not _compare_keys(element1, element2, 'ecp_electrons', operator.eq):
return False
if compare_meta:
if not _compare_keys(element1, element2, 'references', operator.eq):
return False
return True
|
[
"def",
"compare_elements",
"(",
"element1",
",",
"element2",
",",
"compare_electron_shells_meta",
"=",
"False",
",",
"compare_ecp_pots_meta",
"=",
"False",
",",
"compare_meta",
"=",
"False",
",",
"rel_tol",
"=",
"0.0",
")",
":",
"if",
"not",
"_compare_keys",
"(",
"element1",
",",
"element2",
",",
"'electron_shells'",
",",
"electron_shells_are_equal",
",",
"compare_electron_shells_meta",
",",
"rel_tol",
")",
":",
"return",
"False",
"if",
"not",
"_compare_keys",
"(",
"element1",
",",
"element2",
",",
"'ecp_potentials'",
",",
"ecp_pots_are_equal",
",",
"compare_ecp_pots_meta",
",",
"rel_tol",
")",
":",
"return",
"False",
"if",
"not",
"_compare_keys",
"(",
"element1",
",",
"element2",
",",
"'ecp_electrons'",
",",
"operator",
".",
"eq",
")",
":",
"return",
"False",
"if",
"compare_meta",
":",
"if",
"not",
"_compare_keys",
"(",
"element1",
",",
"element2",
",",
"'references'",
",",
"operator",
".",
"eq",
")",
":",
"return",
"False",
"return",
"True"
] | 32.952381 | 23.142857 |
def from_file(file_path) -> dict:
""" Load JSON file """
with io.open(file_path, 'r', encoding='utf-8') as json_stream:
return Json.parse(json_stream, True)
|
[
"def",
"from_file",
"(",
"file_path",
")",
"->",
"dict",
":",
"with",
"io",
".",
"open",
"(",
"file_path",
",",
"'r'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"json_stream",
":",
"return",
"Json",
".",
"parse",
"(",
"json_stream",
",",
"True",
")"
] | 46 | 11.25 |
def networkTwoMode(self, tag1, tag2, directed = False, recordType = True, nodeCount = True, edgeWeight = True, stemmerTag1 = None, stemmerTag2 = None, edgeAttribute = None):
"""Creates a network of the objects found by two WOS tags _tag1_ and _tag2_, each node marked by which tag spawned it making the resultant graph bipartite.
A **networkTwoMode()** looks at each Record in the `RecordCollection` and extracts its values for the tags given by _tag1_ and _tag2_, e.g. the `'WC'` and `'LA'` tags. Then for each object returned by each tag and edge is created between it and every other object of the other tag. So the WOS defined subject tag `'WC'` and language tag `'LA'`, will give a two-mode network showing the connections between subjects and languages. Each node will have an attribute call `'type'` that gives the tag that created it or both if both created it, e.g. the node `'English'` would have the type attribute be `'LA'`.
The number of times each object occurs is count if _nodeCount_ is `True` and the edges count the number of co-occurrences if _edgeWeight_ is `True`. Both are`True` by default.
The _directed_ parameter if `True` will cause the network to be directed with the first tag as the source and the second as the destination.
# Parameters
_tag1_ : `str`
> A two character WOS tag or one of the full names for a tag, the source of edges on the graph
_tag1_ : `str`
> A two character WOS tag or one of the full names for a tag, the target of edges on the graph
_directed_ : `optional [bool]`
> Default `False`, if `True` the returned network is directed
_nodeCount_ : `optional [bool]`
> Default `True`, if `True` each node will have an attribute called "count" that contains an int giving the number of time the object occurred.
_edgeWeight_ : `optional [bool]`
> Default `True`, if `True` each edge will have an attribute called "weight" that contains an int giving the number of time the two objects co-occurrenced.
_stemmerTag1_ : `optional [func]`
> Default `None`, If _stemmerTag1_ is a callable object, basically a function or possibly a class, it will be called for the ID of every node given by _tag1_ in the graph, all IDs are strings.
> For example: the function `f = lambda x: x[0]` if given as the stemmer will cause all IDs to be the first character of their unstemmed IDs. e.g. the title `'Goos-Hanchen and Imbert-Fedorov shifts for leaky guided modes'` will create the node `'G'`.
_stemmerTag2_ : `optional [func]`
> Default `None`, see _stemmerTag1_ as it is the same but for _tag2_
# Returns
`networkx Graph or networkx DiGraph`
> A networkx Graph with the objects of the tags _tag1_ and _tag2_ as nodes and their co-occurrences as edges.
"""
if not isinstance(tag1, str):
raise TagError("{} is not a string it cannot be a tag.".format(tag1))
if not isinstance(tag2, str):
raise TagError("{} is not a string it cannot be a tag.".format(tag2))
if stemmerTag1 is not None:
if isinstance(stemmerTag1, collections.abc.Callable):
stemCheck = True
else:
raise TagError("stemmerTag1 must be callable, e.g. a function or class with a __call__ method.")
else:
stemmerTag1 = lambda x: x
if stemmerTag2 is not None:
if isinstance(stemmerTag2, collections.abc.Callable):
stemCheck = True
else:
raise TagError("stemmerTag2 must be callable, e.g. a function or class with a __call__ method.")
else:
stemmerTag2 = lambda x: x
count = 0
progArgs = (0, "Starting to make a two mode network of " + tag1 + " and " + tag2)
if metaknowledge.VERBOSE_MODE:
progKwargs = {'dummy' : False}
else:
progKwargs = {'dummy' : True}
with _ProgressBar(*progArgs, **progKwargs) as PBar:
if edgeAttribute is not None:
if directed:
grph = nx.MultiDiGraph()
else:
grph = nx.MultiGraph()
else:
if directed:
grph = nx.DiGraph()
else:
grph = nx.Graph()
for R in self:
if PBar:
count += 1
PBar.updateVal(count / len(self), "Analyzing: {}".format(R))
if edgeAttribute is not None:
edgeVals = R.get(edgeAttribute, [])
if not isinstance(edgeVals, list):
edgeVals = [edgeVals]
contents1 = R.get(tag1)
contents2 = R.get(tag2)
if isinstance(contents1, list):
contents1 = [stemmerTag1(str(v)) for v in contents1]
elif contents1 == None:
contents1 = []
else:
contents1 = [stemmerTag1(str(contents1))]
if isinstance(contents2, list):
contents2 = [stemmerTag2(str(v)) for v in contents2]
elif contents2 == None:
contents2 = []
else:
contents2 = [stemmerTag2(str(contents2))]
for node1 in contents1:
for node2 in contents2:
if edgeAttribute:
for edgeVal in edgeVals:
if grph.has_edge(node1, node2, key = edgeVal):
if edgeWeight:
grph.edges[node1, node2, edgeVal]['weight'] += 1
else:
if edgeWeight:
attrDict = {'key' : edgeVal, 'weight' : 1}
else:
attrDict = {'key' : edgeVal}
grph.add_edge(node1, node2, **attrDict)
elif edgeWeight:
try:
grph.edges[node1, node2]['weight'] += 1
except KeyError:
grph.add_edge(node1, node2, weight = 1)
else:
if not grph.has_edge(node1, node2):
grph.add_edge(node1, node2)
if nodeCount:
try:
grph.node[node1]['count'] += 1
except KeyError:
try:
grph.node[node1]['count'] = 1
if recordType:
grph.node[node1]['type'] = tag1
except KeyError:
if recordType:
grph.add_node(node1, type = tag1)
else:
grph.add_node(node1)
else:
if not grph.has_node(node1):
if recordType:
grph.add_node(node1, type = tag1)
else:
grph.add_node(node1)
elif recordType:
if 'type' not in grph.node[node1]:
grph.node[node1]['type'] = tag1
for node2 in contents2:
if nodeCount:
try:
grph.node[node2]['count'] += 1
except KeyError:
try:
grph.node[node2]['count'] = 1
if recordType:
grph.node[node2]['type'] = tag2
except KeyError:
grph.add_node(node2, count = 1)
if recordType:
grph.node[node2]['type'] = tag2
else:
if not grph.has_node(node2):
if recordType:
grph.add_node(node2, type = tag2)
else:
grph.add_node(node2)
elif recordType:
if 'type' not in grph.node[node2]:
grph.node[node2]['type'] = tag2
if PBar:
PBar.finish("Done making a two mode network of " + tag1 + " and " + tag2)
return grph
|
[
"def",
"networkTwoMode",
"(",
"self",
",",
"tag1",
",",
"tag2",
",",
"directed",
"=",
"False",
",",
"recordType",
"=",
"True",
",",
"nodeCount",
"=",
"True",
",",
"edgeWeight",
"=",
"True",
",",
"stemmerTag1",
"=",
"None",
",",
"stemmerTag2",
"=",
"None",
",",
"edgeAttribute",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"tag1",
",",
"str",
")",
":",
"raise",
"TagError",
"(",
"\"{} is not a string it cannot be a tag.\"",
".",
"format",
"(",
"tag1",
")",
")",
"if",
"not",
"isinstance",
"(",
"tag2",
",",
"str",
")",
":",
"raise",
"TagError",
"(",
"\"{} is not a string it cannot be a tag.\"",
".",
"format",
"(",
"tag2",
")",
")",
"if",
"stemmerTag1",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"stemmerTag1",
",",
"collections",
".",
"abc",
".",
"Callable",
")",
":",
"stemCheck",
"=",
"True",
"else",
":",
"raise",
"TagError",
"(",
"\"stemmerTag1 must be callable, e.g. a function or class with a __call__ method.\"",
")",
"else",
":",
"stemmerTag1",
"=",
"lambda",
"x",
":",
"x",
"if",
"stemmerTag2",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"stemmerTag2",
",",
"collections",
".",
"abc",
".",
"Callable",
")",
":",
"stemCheck",
"=",
"True",
"else",
":",
"raise",
"TagError",
"(",
"\"stemmerTag2 must be callable, e.g. a function or class with a __call__ method.\"",
")",
"else",
":",
"stemmerTag2",
"=",
"lambda",
"x",
":",
"x",
"count",
"=",
"0",
"progArgs",
"=",
"(",
"0",
",",
"\"Starting to make a two mode network of \"",
"+",
"tag1",
"+",
"\" and \"",
"+",
"tag2",
")",
"if",
"metaknowledge",
".",
"VERBOSE_MODE",
":",
"progKwargs",
"=",
"{",
"'dummy'",
":",
"False",
"}",
"else",
":",
"progKwargs",
"=",
"{",
"'dummy'",
":",
"True",
"}",
"with",
"_ProgressBar",
"(",
"*",
"progArgs",
",",
"*",
"*",
"progKwargs",
")",
"as",
"PBar",
":",
"if",
"edgeAttribute",
"is",
"not",
"None",
":",
"if",
"directed",
":",
"grph",
"=",
"nx",
".",
"MultiDiGraph",
"(",
")",
"else",
":",
"grph",
"=",
"nx",
".",
"MultiGraph",
"(",
")",
"else",
":",
"if",
"directed",
":",
"grph",
"=",
"nx",
".",
"DiGraph",
"(",
")",
"else",
":",
"grph",
"=",
"nx",
".",
"Graph",
"(",
")",
"for",
"R",
"in",
"self",
":",
"if",
"PBar",
":",
"count",
"+=",
"1",
"PBar",
".",
"updateVal",
"(",
"count",
"/",
"len",
"(",
"self",
")",
",",
"\"Analyzing: {}\"",
".",
"format",
"(",
"R",
")",
")",
"if",
"edgeAttribute",
"is",
"not",
"None",
":",
"edgeVals",
"=",
"R",
".",
"get",
"(",
"edgeAttribute",
",",
"[",
"]",
")",
"if",
"not",
"isinstance",
"(",
"edgeVals",
",",
"list",
")",
":",
"edgeVals",
"=",
"[",
"edgeVals",
"]",
"contents1",
"=",
"R",
".",
"get",
"(",
"tag1",
")",
"contents2",
"=",
"R",
".",
"get",
"(",
"tag2",
")",
"if",
"isinstance",
"(",
"contents1",
",",
"list",
")",
":",
"contents1",
"=",
"[",
"stemmerTag1",
"(",
"str",
"(",
"v",
")",
")",
"for",
"v",
"in",
"contents1",
"]",
"elif",
"contents1",
"==",
"None",
":",
"contents1",
"=",
"[",
"]",
"else",
":",
"contents1",
"=",
"[",
"stemmerTag1",
"(",
"str",
"(",
"contents1",
")",
")",
"]",
"if",
"isinstance",
"(",
"contents2",
",",
"list",
")",
":",
"contents2",
"=",
"[",
"stemmerTag2",
"(",
"str",
"(",
"v",
")",
")",
"for",
"v",
"in",
"contents2",
"]",
"elif",
"contents2",
"==",
"None",
":",
"contents2",
"=",
"[",
"]",
"else",
":",
"contents2",
"=",
"[",
"stemmerTag2",
"(",
"str",
"(",
"contents2",
")",
")",
"]",
"for",
"node1",
"in",
"contents1",
":",
"for",
"node2",
"in",
"contents2",
":",
"if",
"edgeAttribute",
":",
"for",
"edgeVal",
"in",
"edgeVals",
":",
"if",
"grph",
".",
"has_edge",
"(",
"node1",
",",
"node2",
",",
"key",
"=",
"edgeVal",
")",
":",
"if",
"edgeWeight",
":",
"grph",
".",
"edges",
"[",
"node1",
",",
"node2",
",",
"edgeVal",
"]",
"[",
"'weight'",
"]",
"+=",
"1",
"else",
":",
"if",
"edgeWeight",
":",
"attrDict",
"=",
"{",
"'key'",
":",
"edgeVal",
",",
"'weight'",
":",
"1",
"}",
"else",
":",
"attrDict",
"=",
"{",
"'key'",
":",
"edgeVal",
"}",
"grph",
".",
"add_edge",
"(",
"node1",
",",
"node2",
",",
"*",
"*",
"attrDict",
")",
"elif",
"edgeWeight",
":",
"try",
":",
"grph",
".",
"edges",
"[",
"node1",
",",
"node2",
"]",
"[",
"'weight'",
"]",
"+=",
"1",
"except",
"KeyError",
":",
"grph",
".",
"add_edge",
"(",
"node1",
",",
"node2",
",",
"weight",
"=",
"1",
")",
"else",
":",
"if",
"not",
"grph",
".",
"has_edge",
"(",
"node1",
",",
"node2",
")",
":",
"grph",
".",
"add_edge",
"(",
"node1",
",",
"node2",
")",
"if",
"nodeCount",
":",
"try",
":",
"grph",
".",
"node",
"[",
"node1",
"]",
"[",
"'count'",
"]",
"+=",
"1",
"except",
"KeyError",
":",
"try",
":",
"grph",
".",
"node",
"[",
"node1",
"]",
"[",
"'count'",
"]",
"=",
"1",
"if",
"recordType",
":",
"grph",
".",
"node",
"[",
"node1",
"]",
"[",
"'type'",
"]",
"=",
"tag1",
"except",
"KeyError",
":",
"if",
"recordType",
":",
"grph",
".",
"add_node",
"(",
"node1",
",",
"type",
"=",
"tag1",
")",
"else",
":",
"grph",
".",
"add_node",
"(",
"node1",
")",
"else",
":",
"if",
"not",
"grph",
".",
"has_node",
"(",
"node1",
")",
":",
"if",
"recordType",
":",
"grph",
".",
"add_node",
"(",
"node1",
",",
"type",
"=",
"tag1",
")",
"else",
":",
"grph",
".",
"add_node",
"(",
"node1",
")",
"elif",
"recordType",
":",
"if",
"'type'",
"not",
"in",
"grph",
".",
"node",
"[",
"node1",
"]",
":",
"grph",
".",
"node",
"[",
"node1",
"]",
"[",
"'type'",
"]",
"=",
"tag1",
"for",
"node2",
"in",
"contents2",
":",
"if",
"nodeCount",
":",
"try",
":",
"grph",
".",
"node",
"[",
"node2",
"]",
"[",
"'count'",
"]",
"+=",
"1",
"except",
"KeyError",
":",
"try",
":",
"grph",
".",
"node",
"[",
"node2",
"]",
"[",
"'count'",
"]",
"=",
"1",
"if",
"recordType",
":",
"grph",
".",
"node",
"[",
"node2",
"]",
"[",
"'type'",
"]",
"=",
"tag2",
"except",
"KeyError",
":",
"grph",
".",
"add_node",
"(",
"node2",
",",
"count",
"=",
"1",
")",
"if",
"recordType",
":",
"grph",
".",
"node",
"[",
"node2",
"]",
"[",
"'type'",
"]",
"=",
"tag2",
"else",
":",
"if",
"not",
"grph",
".",
"has_node",
"(",
"node2",
")",
":",
"if",
"recordType",
":",
"grph",
".",
"add_node",
"(",
"node2",
",",
"type",
"=",
"tag2",
")",
"else",
":",
"grph",
".",
"add_node",
"(",
"node2",
")",
"elif",
"recordType",
":",
"if",
"'type'",
"not",
"in",
"grph",
".",
"node",
"[",
"node2",
"]",
":",
"grph",
".",
"node",
"[",
"node2",
"]",
"[",
"'type'",
"]",
"=",
"tag2",
"if",
"PBar",
":",
"PBar",
".",
"finish",
"(",
"\"Done making a two mode network of \"",
"+",
"tag1",
"+",
"\" and \"",
"+",
"tag2",
")",
"return",
"grph"
] | 50.312139 | 27.583815 |
def add_watch_callback(self, *args, **kwargs):
"""
Watch a key or range of keys and call a callback on every event.
If timeout was declared during the client initialization and
the watch cannot be created during that time the method raises
a ``WatchTimedOut`` exception.
:param key: key to watch
:param callback: callback function
:returns: watch_id. Later it could be used for cancelling watch.
"""
try:
return self.watcher.add_callback(*args, **kwargs)
except queue.Empty:
raise exceptions.WatchTimedOut()
|
[
"def",
"add_watch_callback",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"self",
".",
"watcher",
".",
"add_callback",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"queue",
".",
"Empty",
":",
"raise",
"exceptions",
".",
"WatchTimedOut",
"(",
")"
] | 35.647059 | 19.176471 |
def google_text_emphasis(style):
"""return a list of all emphasis modifiers of the element"""
emphasis = []
if 'text-decoration' in style:
emphasis.append(style['text-decoration'])
if 'font-style' in style:
emphasis.append(style['font-style'])
if 'font-weight' in style:
emphasis.append(style['font-weight'])
return emphasis
|
[
"def",
"google_text_emphasis",
"(",
"style",
")",
":",
"emphasis",
"=",
"[",
"]",
"if",
"'text-decoration'",
"in",
"style",
":",
"emphasis",
".",
"append",
"(",
"style",
"[",
"'text-decoration'",
"]",
")",
"if",
"'font-style'",
"in",
"style",
":",
"emphasis",
".",
"append",
"(",
"style",
"[",
"'font-style'",
"]",
")",
"if",
"'font-weight'",
"in",
"style",
":",
"emphasis",
".",
"append",
"(",
"style",
"[",
"'font-weight'",
"]",
")",
"return",
"emphasis"
] | 36.3 | 9.7 |
def onchange_dates(self):
'''
This method gives the duration between check in and checkout
if customer will leave only for some hour it would be considers
as a whole day.If customer will check in checkout for more or equal
hours, which configured in company as additional hours than it would
be consider as full days
--------------------------------------------------------------------
@param self: object pointer
@return: Duration and checkout_date
'''
configured_addition_hours = 0
wid = self.warehouse_id
whouse_com_id = wid or wid.company_id
if whouse_com_id:
configured_addition_hours = wid.company_id.additional_hours
myduration = 0
chckin = self.checkin_date
chckout = self.checkout_date
if chckin and chckout:
server_dt = DEFAULT_SERVER_DATETIME_FORMAT
chkin_dt = datetime.datetime.strptime(chckin, server_dt)
chkout_dt = datetime.datetime.strptime(chckout, server_dt)
dur = chkout_dt - chkin_dt
sec_dur = dur.seconds
if (not dur.days and not sec_dur) or (dur.days and not sec_dur):
myduration = dur.days
else:
myduration = dur.days + 1
# To calculate additional hours in hotel room as per minutes
if configured_addition_hours > 0:
additional_hours = abs((dur.seconds / 60) / 60)
if additional_hours >= configured_addition_hours:
myduration += 1
self.duration = myduration
self.duration_dummy = self.duration
|
[
"def",
"onchange_dates",
"(",
"self",
")",
":",
"configured_addition_hours",
"=",
"0",
"wid",
"=",
"self",
".",
"warehouse_id",
"whouse_com_id",
"=",
"wid",
"or",
"wid",
".",
"company_id",
"if",
"whouse_com_id",
":",
"configured_addition_hours",
"=",
"wid",
".",
"company_id",
".",
"additional_hours",
"myduration",
"=",
"0",
"chckin",
"=",
"self",
".",
"checkin_date",
"chckout",
"=",
"self",
".",
"checkout_date",
"if",
"chckin",
"and",
"chckout",
":",
"server_dt",
"=",
"DEFAULT_SERVER_DATETIME_FORMAT",
"chkin_dt",
"=",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"chckin",
",",
"server_dt",
")",
"chkout_dt",
"=",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"chckout",
",",
"server_dt",
")",
"dur",
"=",
"chkout_dt",
"-",
"chkin_dt",
"sec_dur",
"=",
"dur",
".",
"seconds",
"if",
"(",
"not",
"dur",
".",
"days",
"and",
"not",
"sec_dur",
")",
"or",
"(",
"dur",
".",
"days",
"and",
"not",
"sec_dur",
")",
":",
"myduration",
"=",
"dur",
".",
"days",
"else",
":",
"myduration",
"=",
"dur",
".",
"days",
"+",
"1",
"# To calculate additional hours in hotel room as per minutes",
"if",
"configured_addition_hours",
">",
"0",
":",
"additional_hours",
"=",
"abs",
"(",
"(",
"dur",
".",
"seconds",
"/",
"60",
")",
"/",
"60",
")",
"if",
"additional_hours",
">=",
"configured_addition_hours",
":",
"myduration",
"+=",
"1",
"self",
".",
"duration",
"=",
"myduration",
"self",
".",
"duration_dummy",
"=",
"self",
".",
"duration"
] | 45.694444 | 16.638889 |
def login(remote_app):
"""Send user to remote application for authentication."""
oauth = current_app.extensions['oauthlib.client']
if remote_app not in oauth.remote_apps:
return abort(404)
# Get redirect target in safe manner.
next_param = get_safe_redirect_target(arg='next')
# Redirect URI - must be registered in the remote service.
callback_url = url_for(
'.authorized',
remote_app=remote_app,
_external=True,
)
# Create a JSON Web Token that expires after OAUTHCLIENT_STATE_EXPIRES
# seconds.
state_token = serializer.dumps({
'app': remote_app,
'next': next_param,
'sid': _create_identifier(),
})
return oauth.remote_apps[remote_app].authorize(
callback=callback_url,
state=state_token,
)
|
[
"def",
"login",
"(",
"remote_app",
")",
":",
"oauth",
"=",
"current_app",
".",
"extensions",
"[",
"'oauthlib.client'",
"]",
"if",
"remote_app",
"not",
"in",
"oauth",
".",
"remote_apps",
":",
"return",
"abort",
"(",
"404",
")",
"# Get redirect target in safe manner.",
"next_param",
"=",
"get_safe_redirect_target",
"(",
"arg",
"=",
"'next'",
")",
"# Redirect URI - must be registered in the remote service.",
"callback_url",
"=",
"url_for",
"(",
"'.authorized'",
",",
"remote_app",
"=",
"remote_app",
",",
"_external",
"=",
"True",
",",
")",
"# Create a JSON Web Token that expires after OAUTHCLIENT_STATE_EXPIRES",
"# seconds.",
"state_token",
"=",
"serializer",
".",
"dumps",
"(",
"{",
"'app'",
":",
"remote_app",
",",
"'next'",
":",
"next_param",
",",
"'sid'",
":",
"_create_identifier",
"(",
")",
",",
"}",
")",
"return",
"oauth",
".",
"remote_apps",
"[",
"remote_app",
"]",
".",
"authorize",
"(",
"callback",
"=",
"callback_url",
",",
"state",
"=",
"state_token",
",",
")"
] | 27.517241 | 19.896552 |
def total(self):
"""Return the total number of records"""
if self._result_cache:
return self._result_cache.total
return self.all().total
|
[
"def",
"total",
"(",
"self",
")",
":",
"if",
"self",
".",
"_result_cache",
":",
"return",
"self",
".",
"_result_cache",
".",
"total",
"return",
"self",
".",
"all",
"(",
")",
".",
"total"
] | 28 | 14.333333 |
def account_block(self, id):
"""
Block a user.
Returns a `relationship dict`_ containing the updated relationship to the user.
"""
id = self.__unpack_id(id)
url = '/api/v1/accounts/{0}/block'.format(str(id))
return self.__api_request('POST', url)
|
[
"def",
"account_block",
"(",
"self",
",",
"id",
")",
":",
"id",
"=",
"self",
".",
"__unpack_id",
"(",
"id",
")",
"url",
"=",
"'/api/v1/accounts/{0}/block'",
".",
"format",
"(",
"str",
"(",
"id",
")",
")",
"return",
"self",
".",
"__api_request",
"(",
"'POST'",
",",
"url",
")"
] | 32.777778 | 16.555556 |
def write_files(dos, pdos, prefix=None, directory=None, zero_to_efermi=True):
"""Write the density of states data to disk.
Args:
dos (:obj:`~pymatgen.electronic_structure.dos.Dos` or \
:obj:`~pymatgen.electronic_structure.dos.CompleteDos`): The total
density of states.
pdos (dict): The projected density of states. Formatted as a
:obj:`dict` of :obj:`dict` mapping the elements and their orbitals
to :obj:`~pymatgen.electronic_structure.dos.Dos` objects. For
example::
{
'Bi': {'s': Dos, 'p': Dos},
'S': {'s': Dos}
}
prefix (:obj:`str`, optional): A prefix for file names.
directory (:obj:`str`, optional): The directory in which to save files.
zero_to_efermi (:obj:`bool`, optional): Normalise the energy such
that the Fermi level is set as 0 eV.
"""
# defining these cryptic lists makes formatting the data much easier later
if len(dos.densities) == 1:
sdata = [[Spin.up, 1, '']]
else:
sdata = [[Spin.up, 1, '(up)'], [Spin.down, -1, '(down)']]
header = ['energy']
eners = dos.energies - dos.efermi if zero_to_efermi else dos.energies
tdos_data = [eners]
for spin, sign, label in sdata:
header.append('dos{}'.format(label))
tdos_data.append(dos.densities[spin] * sign)
tdos_data = np.stack(tdos_data, axis=1)
filename = "{}_total_dos.dat".format(prefix) if prefix else 'total_dos.dat'
if directory:
filename = os.path.join(directory, filename)
np.savetxt(filename, tdos_data, header=" ".join(header))
spin = len(dos.densities)
for el, el_pdos in pdos.items():
header = ['energy']
pdos_data = [eners]
for orb in sort_orbitals(el_pdos):
for spin, sign, label in sdata:
header.append('{}{}'.format(orb, label))
pdos_data.append(el_pdos[orb].densities[spin] * sign)
pdos_data = np.stack(pdos_data, axis=1)
if prefix:
filename = '{}_{}_dos.dat'.format(prefix, el)
else:
filename = '{}_dos.dat'.format(el)
if directory:
filename = os.path.join(directory, filename)
np.savetxt(filename, pdos_data, header=" ".join(header))
|
[
"def",
"write_files",
"(",
"dos",
",",
"pdos",
",",
"prefix",
"=",
"None",
",",
"directory",
"=",
"None",
",",
"zero_to_efermi",
"=",
"True",
")",
":",
"# defining these cryptic lists makes formatting the data much easier later",
"if",
"len",
"(",
"dos",
".",
"densities",
")",
"==",
"1",
":",
"sdata",
"=",
"[",
"[",
"Spin",
".",
"up",
",",
"1",
",",
"''",
"]",
"]",
"else",
":",
"sdata",
"=",
"[",
"[",
"Spin",
".",
"up",
",",
"1",
",",
"'(up)'",
"]",
",",
"[",
"Spin",
".",
"down",
",",
"-",
"1",
",",
"'(down)'",
"]",
"]",
"header",
"=",
"[",
"'energy'",
"]",
"eners",
"=",
"dos",
".",
"energies",
"-",
"dos",
".",
"efermi",
"if",
"zero_to_efermi",
"else",
"dos",
".",
"energies",
"tdos_data",
"=",
"[",
"eners",
"]",
"for",
"spin",
",",
"sign",
",",
"label",
"in",
"sdata",
":",
"header",
".",
"append",
"(",
"'dos{}'",
".",
"format",
"(",
"label",
")",
")",
"tdos_data",
".",
"append",
"(",
"dos",
".",
"densities",
"[",
"spin",
"]",
"*",
"sign",
")",
"tdos_data",
"=",
"np",
".",
"stack",
"(",
"tdos_data",
",",
"axis",
"=",
"1",
")",
"filename",
"=",
"\"{}_total_dos.dat\"",
".",
"format",
"(",
"prefix",
")",
"if",
"prefix",
"else",
"'total_dos.dat'",
"if",
"directory",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"filename",
")",
"np",
".",
"savetxt",
"(",
"filename",
",",
"tdos_data",
",",
"header",
"=",
"\" \"",
".",
"join",
"(",
"header",
")",
")",
"spin",
"=",
"len",
"(",
"dos",
".",
"densities",
")",
"for",
"el",
",",
"el_pdos",
"in",
"pdos",
".",
"items",
"(",
")",
":",
"header",
"=",
"[",
"'energy'",
"]",
"pdos_data",
"=",
"[",
"eners",
"]",
"for",
"orb",
"in",
"sort_orbitals",
"(",
"el_pdos",
")",
":",
"for",
"spin",
",",
"sign",
",",
"label",
"in",
"sdata",
":",
"header",
".",
"append",
"(",
"'{}{}'",
".",
"format",
"(",
"orb",
",",
"label",
")",
")",
"pdos_data",
".",
"append",
"(",
"el_pdos",
"[",
"orb",
"]",
".",
"densities",
"[",
"spin",
"]",
"*",
"sign",
")",
"pdos_data",
"=",
"np",
".",
"stack",
"(",
"pdos_data",
",",
"axis",
"=",
"1",
")",
"if",
"prefix",
":",
"filename",
"=",
"'{}_{}_dos.dat'",
".",
"format",
"(",
"prefix",
",",
"el",
")",
"else",
":",
"filename",
"=",
"'{}_dos.dat'",
".",
"format",
"(",
"el",
")",
"if",
"directory",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"filename",
")",
"np",
".",
"savetxt",
"(",
"filename",
",",
"pdos_data",
",",
"header",
"=",
"\" \"",
".",
"join",
"(",
"header",
")",
")"
] | 39.672414 | 21.068966 |
def d2attrs(*args, **kwargs):
"""Utility function to remove ``**kwargs`` parsing boiler-plate in
``__init__``:
>>> kwargs = dict(name='Bill', age=51, income=1e7)
>>> self = ezstruct(); d2attrs(kwargs, self, 'income', 'name'); self
ezstruct(income=10000000.0, name='Bill')
>>> self = ezstruct(); d2attrs(kwargs, self, 'income', age=0, bloodType='A'); self
ezstruct(age=51, bloodType='A', income=10000000.0)
To set all keys from ``kwargs`` use:
>>> self = ezstruct(); d2attrs(kwargs, self, 'all!'); self
ezstruct(age=51, income=10000000.0, name='Bill')
"""
(d, self), args = args[:2], args[2:]
if args[0] == 'all!':
assert len(args) == 1
for k in d: setattr(self, k, d[k])
else:
if len(args) != len(set(args)) or set(kwargs) & set(args):
raise ValueError('Duplicate keys: %s' %
list(notUnique(args)) + list(set(kwargs) & set(args)))
for k in args:
if k in kwargs: raise ValueError('%s specified twice' % k)
setattr(self, k, d[k])
for dk in kwargs:
setattr(self, dk, d.get(dk, kwargs[dk]))
|
[
"def",
"d2attrs",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"(",
"d",
",",
"self",
")",
",",
"args",
"=",
"args",
"[",
":",
"2",
"]",
",",
"args",
"[",
"2",
":",
"]",
"if",
"args",
"[",
"0",
"]",
"==",
"'all!'",
":",
"assert",
"len",
"(",
"args",
")",
"==",
"1",
"for",
"k",
"in",
"d",
":",
"setattr",
"(",
"self",
",",
"k",
",",
"d",
"[",
"k",
"]",
")",
"else",
":",
"if",
"len",
"(",
"args",
")",
"!=",
"len",
"(",
"set",
"(",
"args",
")",
")",
"or",
"set",
"(",
"kwargs",
")",
"&",
"set",
"(",
"args",
")",
":",
"raise",
"ValueError",
"(",
"'Duplicate keys: %s'",
"%",
"list",
"(",
"notUnique",
"(",
"args",
")",
")",
"+",
"list",
"(",
"set",
"(",
"kwargs",
")",
"&",
"set",
"(",
"args",
")",
")",
")",
"for",
"k",
"in",
"args",
":",
"if",
"k",
"in",
"kwargs",
":",
"raise",
"ValueError",
"(",
"'%s specified twice'",
"%",
"k",
")",
"setattr",
"(",
"self",
",",
"k",
",",
"d",
"[",
"k",
"]",
")",
"for",
"dk",
"in",
"kwargs",
":",
"setattr",
"(",
"self",
",",
"dk",
",",
"d",
".",
"get",
"(",
"dk",
",",
"kwargs",
"[",
"dk",
"]",
")",
")"
] | 41.785714 | 19.535714 |
def _validate_params(self):
"""
method to sanitize model parameters
Parameters
---------
None
Returns
-------
None
"""
self.distribution = GammaDist(scale=self.scale)
super(GammaGAM, self)._validate_params()
|
[
"def",
"_validate_params",
"(",
"self",
")",
":",
"self",
".",
"distribution",
"=",
"GammaDist",
"(",
"scale",
"=",
"self",
".",
"scale",
")",
"super",
"(",
"GammaGAM",
",",
"self",
")",
".",
"_validate_params",
"(",
")"
] | 20.285714 | 19.285714 |
def find_train_knns(self, data_activations):
"""
Given a data_activation dictionary that contains a np array with activations for each layer,
find the knns in the training data.
"""
knns_ind = {}
knns_labels = {}
for layer in self.layers:
# Pre-process representations of data to normalize and remove training data mean.
data_activations_layer = copy.copy(data_activations[layer])
nb_data = data_activations_layer.shape[0]
data_activations_layer /= np.linalg.norm(
data_activations_layer, axis=1).reshape(-1, 1)
data_activations_layer -= self.centers[layer]
# Use FALCONN to find indices of nearest neighbors in training data.
knns_ind[layer] = np.zeros(
(data_activations_layer.shape[0], self.neighbors), dtype=np.int32)
knn_errors = 0
for i in range(data_activations_layer.shape[0]):
query_res = self.query_objects[layer].find_k_nearest_neighbors(
data_activations_layer[i], self.neighbors)
try:
knns_ind[layer][i, :] = query_res
except: # pylint: disable-msg=W0702
knns_ind[layer][i, :len(query_res)] = query_res
knn_errors += knns_ind[layer].shape[1] - len(query_res)
# Find labels of neighbors found in the training data.
knns_labels[layer] = np.zeros((nb_data, self.neighbors), dtype=np.int32)
for data_id in range(nb_data):
knns_labels[layer][data_id, :] = self.train_labels[knns_ind[layer][data_id]]
return knns_ind, knns_labels
|
[
"def",
"find_train_knns",
"(",
"self",
",",
"data_activations",
")",
":",
"knns_ind",
"=",
"{",
"}",
"knns_labels",
"=",
"{",
"}",
"for",
"layer",
"in",
"self",
".",
"layers",
":",
"# Pre-process representations of data to normalize and remove training data mean.",
"data_activations_layer",
"=",
"copy",
".",
"copy",
"(",
"data_activations",
"[",
"layer",
"]",
")",
"nb_data",
"=",
"data_activations_layer",
".",
"shape",
"[",
"0",
"]",
"data_activations_layer",
"/=",
"np",
".",
"linalg",
".",
"norm",
"(",
"data_activations_layer",
",",
"axis",
"=",
"1",
")",
".",
"reshape",
"(",
"-",
"1",
",",
"1",
")",
"data_activations_layer",
"-=",
"self",
".",
"centers",
"[",
"layer",
"]",
"# Use FALCONN to find indices of nearest neighbors in training data.",
"knns_ind",
"[",
"layer",
"]",
"=",
"np",
".",
"zeros",
"(",
"(",
"data_activations_layer",
".",
"shape",
"[",
"0",
"]",
",",
"self",
".",
"neighbors",
")",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"knn_errors",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"data_activations_layer",
".",
"shape",
"[",
"0",
"]",
")",
":",
"query_res",
"=",
"self",
".",
"query_objects",
"[",
"layer",
"]",
".",
"find_k_nearest_neighbors",
"(",
"data_activations_layer",
"[",
"i",
"]",
",",
"self",
".",
"neighbors",
")",
"try",
":",
"knns_ind",
"[",
"layer",
"]",
"[",
"i",
",",
":",
"]",
"=",
"query_res",
"except",
":",
"# pylint: disable-msg=W0702",
"knns_ind",
"[",
"layer",
"]",
"[",
"i",
",",
":",
"len",
"(",
"query_res",
")",
"]",
"=",
"query_res",
"knn_errors",
"+=",
"knns_ind",
"[",
"layer",
"]",
".",
"shape",
"[",
"1",
"]",
"-",
"len",
"(",
"query_res",
")",
"# Find labels of neighbors found in the training data.",
"knns_labels",
"[",
"layer",
"]",
"=",
"np",
".",
"zeros",
"(",
"(",
"nb_data",
",",
"self",
".",
"neighbors",
")",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"for",
"data_id",
"in",
"range",
"(",
"nb_data",
")",
":",
"knns_labels",
"[",
"layer",
"]",
"[",
"data_id",
",",
":",
"]",
"=",
"self",
".",
"train_labels",
"[",
"knns_ind",
"[",
"layer",
"]",
"[",
"data_id",
"]",
"]",
"return",
"knns_ind",
",",
"knns_labels"
] | 43 | 21 |
def get_images(output_directory, explicit, input_path, config, parsed_article):
"""
Main logic controller for the placement of images into the output directory
Controlling logic for placement of the appropriate imager files into the
EPUB directory. This function interacts with interface arguments as well as
the local installation config.py file. These may change behavior of this
function in terms of how it looks for images relative to the input, where it
finds explicit images, whether it will attempt to download images, and
whether successfully downloaded images will be stored in the cache.
Parameters
----------
output_directory : str
The directory path where the EPUB is being constructed/output
explicit : str
A directory path to a user specified directory of images. Allows *
wildcard expansion.
input_path : str
The absolute path to the input XML file.
config : config module
The imported configuration module
parsed_article : openaccess_epub.article.Article object
The Article instance for the article being converted to EPUB
"""
#Split the DOI
journal_doi, article_doi = parsed_article.doi.split('/')
log.debug('journal-doi : {0}'.format(journal_doi))
log.debug('article-doi : {0}'.format(article_doi))
#Get the rootname for wildcard expansion
rootname = utils.file_root_name(input_path)
#Specify where to place the images in the output
img_dir = os.path.join(output_directory,
'EPUB',
'images-{0}'.format(article_doi))
log.info('Using {0} as image directory target'.format(img_dir))
#Construct path to cache for article
article_cache = os.path.join(config.image_cache, journal_doi, article_doi)
#Use manual image directory, explicit images
if explicit:
success = explicit_images(explicit, img_dir, rootname, config)
if success and config.use_image_cache:
move_images_to_cache(img_dir, article_cache)
#Explicit images prevents all other image methods
return success
#Input-Relative import, looks for any one of the listed options
if config.use_input_relative_images:
#Prevents other image methods only if successful
if input_relative_images(input_path, img_dir, rootname, config):
if config.use_image_cache:
move_images_to_cache(img_dir, article_cache)
return True
#Use cache for article if it exists
if config.use_image_cache:
#Prevents other image methods only if successful
if image_cache(article_cache, img_dir):
return True
#Download images from Internet
if config.use_image_fetching:
os.mkdir(img_dir)
if journal_doi == '10.3389':
fetch_frontiers_images(article_doi, img_dir)
if config.use_image_cache:
move_images_to_cache(img_dir, article_cache)
return True
elif journal_doi == '10.1371':
success = fetch_plos_images(article_doi, img_dir, parsed_article)
if success and config.use_image_cache:
move_images_to_cache(img_dir, article_cache)
return success
else:
log.error('Fetching images for this publisher is not supported!')
return False
return False
|
[
"def",
"get_images",
"(",
"output_directory",
",",
"explicit",
",",
"input_path",
",",
"config",
",",
"parsed_article",
")",
":",
"#Split the DOI",
"journal_doi",
",",
"article_doi",
"=",
"parsed_article",
".",
"doi",
".",
"split",
"(",
"'/'",
")",
"log",
".",
"debug",
"(",
"'journal-doi : {0}'",
".",
"format",
"(",
"journal_doi",
")",
")",
"log",
".",
"debug",
"(",
"'article-doi : {0}'",
".",
"format",
"(",
"article_doi",
")",
")",
"#Get the rootname for wildcard expansion",
"rootname",
"=",
"utils",
".",
"file_root_name",
"(",
"input_path",
")",
"#Specify where to place the images in the output",
"img_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_directory",
",",
"'EPUB'",
",",
"'images-{0}'",
".",
"format",
"(",
"article_doi",
")",
")",
"log",
".",
"info",
"(",
"'Using {0} as image directory target'",
".",
"format",
"(",
"img_dir",
")",
")",
"#Construct path to cache for article",
"article_cache",
"=",
"os",
".",
"path",
".",
"join",
"(",
"config",
".",
"image_cache",
",",
"journal_doi",
",",
"article_doi",
")",
"#Use manual image directory, explicit images",
"if",
"explicit",
":",
"success",
"=",
"explicit_images",
"(",
"explicit",
",",
"img_dir",
",",
"rootname",
",",
"config",
")",
"if",
"success",
"and",
"config",
".",
"use_image_cache",
":",
"move_images_to_cache",
"(",
"img_dir",
",",
"article_cache",
")",
"#Explicit images prevents all other image methods",
"return",
"success",
"#Input-Relative import, looks for any one of the listed options",
"if",
"config",
".",
"use_input_relative_images",
":",
"#Prevents other image methods only if successful",
"if",
"input_relative_images",
"(",
"input_path",
",",
"img_dir",
",",
"rootname",
",",
"config",
")",
":",
"if",
"config",
".",
"use_image_cache",
":",
"move_images_to_cache",
"(",
"img_dir",
",",
"article_cache",
")",
"return",
"True",
"#Use cache for article if it exists",
"if",
"config",
".",
"use_image_cache",
":",
"#Prevents other image methods only if successful",
"if",
"image_cache",
"(",
"article_cache",
",",
"img_dir",
")",
":",
"return",
"True",
"#Download images from Internet",
"if",
"config",
".",
"use_image_fetching",
":",
"os",
".",
"mkdir",
"(",
"img_dir",
")",
"if",
"journal_doi",
"==",
"'10.3389'",
":",
"fetch_frontiers_images",
"(",
"article_doi",
",",
"img_dir",
")",
"if",
"config",
".",
"use_image_cache",
":",
"move_images_to_cache",
"(",
"img_dir",
",",
"article_cache",
")",
"return",
"True",
"elif",
"journal_doi",
"==",
"'10.1371'",
":",
"success",
"=",
"fetch_plos_images",
"(",
"article_doi",
",",
"img_dir",
",",
"parsed_article",
")",
"if",
"success",
"and",
"config",
".",
"use_image_cache",
":",
"move_images_to_cache",
"(",
"img_dir",
",",
"article_cache",
")",
"return",
"success",
"else",
":",
"log",
".",
"error",
"(",
"'Fetching images for this publisher is not supported!'",
")",
"return",
"False",
"return",
"False"
] | 41.185185 | 20.419753 |
def read(self, addr, size):
'''Read access.
:param addr: i2c slave address
:type addr: char
:param size: size of transfer
:type size: int
:returns: data byte array
:rtype: array.array('B')
'''
self.set_addr(addr | 0x01)
self.set_size(size)
self.start()
while not self.is_ready:
pass
return self.get_data(size)
|
[
"def",
"read",
"(",
"self",
",",
"addr",
",",
"size",
")",
":",
"self",
".",
"set_addr",
"(",
"addr",
"|",
"0x01",
")",
"self",
".",
"set_size",
"(",
"size",
")",
"self",
".",
"start",
"(",
")",
"while",
"not",
"self",
".",
"is_ready",
":",
"pass",
"return",
"self",
".",
"get_data",
"(",
"size",
")"
] | 23.777778 | 16.222222 |
def read_lastmodfile(directory):
"""
Return the number of the final inversion result.
"""
filename = '{0}/exe/inv.lastmod'.format(directory)
# filename HAS to exist. Otherwise the inversion was not finished
if(not os.path.isfile(filename)):
return None
linestring = open(filename, 'r').readline().strip()
linestring = linestring.replace("\n", '')
linestring = linestring.replace(".mag", '')
linestring = linestring.replace("../inv/rho", '')
return linestring
|
[
"def",
"read_lastmodfile",
"(",
"directory",
")",
":",
"filename",
"=",
"'{0}/exe/inv.lastmod'",
".",
"format",
"(",
"directory",
")",
"# filename HAS to exist. Otherwise the inversion was not finished",
"if",
"(",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
")",
":",
"return",
"None",
"linestring",
"=",
"open",
"(",
"filename",
",",
"'r'",
")",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
"linestring",
"=",
"linestring",
".",
"replace",
"(",
"\"\\n\"",
",",
"''",
")",
"linestring",
"=",
"linestring",
".",
"replace",
"(",
"\".mag\"",
",",
"''",
")",
"linestring",
"=",
"linestring",
".",
"replace",
"(",
"\"../inv/rho\"",
",",
"''",
")",
"return",
"linestring"
] | 35.571429 | 13.285714 |
def post_create_app(cls, app, **settings):
"""Automatically register and init the Flask Marshmallow extension.
Args:
app (flask.Flask): The application instance in which to initialize
Flask Marshmallow upon.
Kwargs:
settings (dict): The settings passed to this method from the
parent app.
Returns:
flask.Flask: The Flask application that was passed in.
"""
super(MarshmallowAwareApp, cls).post_create_app(app, **settings)
marsh.init_app(app)
return app
|
[
"def",
"post_create_app",
"(",
"cls",
",",
"app",
",",
"*",
"*",
"settings",
")",
":",
"super",
"(",
"MarshmallowAwareApp",
",",
"cls",
")",
".",
"post_create_app",
"(",
"app",
",",
"*",
"*",
"settings",
")",
"marsh",
".",
"init_app",
"(",
"app",
")",
"return",
"app"
] | 30.052632 | 23.947368 |
def _do_put(self, uri, **kwargs):
"""
Convinient method for POST requests
Returns http request status value from a POST request
"""
#TODO:
# Add error handling. Check for HTTP status here would be much more conveinent than in each calling method
scaleioapi_put_headers = {'content-type':'application/json'}
print "_do_put()"
if kwargs:
for key, value in kwargs.iteritems():
#if key == 'headers':
# scaleio_post_headers = value
# print "Adding custom PUT headers"
if key == 'json':
payload = value
try:
self.logger.debug("do_put(): " + "{}".format(uri))
#self._session.headers.update({'Content-Type':'application/json'})
response = self._session.put(url, headers=scaleioapi_put_headers, verify_ssl=self._im_verify_ssl, data=json.dumps(payload))
self.logger.debug("_do_put() - Response: " + "{}".format(response.text))
if response.status_code == requests.codes.ok:
return response
else:
self.logger.error("_do_put() - HTTP response error: " + "{}".format(response.status_code))
raise RuntimeError("_do_put() - HTTP response error" + response.status_code)
except:
raise RuntimeError("_do_put() - Communication error with ScaleIO gateway")
return response
|
[
"def",
"_do_put",
"(",
"self",
",",
"uri",
",",
"*",
"*",
"kwargs",
")",
":",
"#TODO:",
"# Add error handling. Check for HTTP status here would be much more conveinent than in each calling method",
"scaleioapi_put_headers",
"=",
"{",
"'content-type'",
":",
"'application/json'",
"}",
"print",
"\"_do_put()\"",
"if",
"kwargs",
":",
"for",
"key",
",",
"value",
"in",
"kwargs",
".",
"iteritems",
"(",
")",
":",
"#if key == 'headers':",
"# scaleio_post_headers = value",
"# print \"Adding custom PUT headers\"",
"if",
"key",
"==",
"'json'",
":",
"payload",
"=",
"value",
"try",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"do_put(): \"",
"+",
"\"{}\"",
".",
"format",
"(",
"uri",
")",
")",
"#self._session.headers.update({'Content-Type':'application/json'})",
"response",
"=",
"self",
".",
"_session",
".",
"put",
"(",
"url",
",",
"headers",
"=",
"scaleioapi_put_headers",
",",
"verify_ssl",
"=",
"self",
".",
"_im_verify_ssl",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"payload",
")",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"_do_put() - Response: \"",
"+",
"\"{}\"",
".",
"format",
"(",
"response",
".",
"text",
")",
")",
"if",
"response",
".",
"status_code",
"==",
"requests",
".",
"codes",
".",
"ok",
":",
"return",
"response",
"else",
":",
"self",
".",
"logger",
".",
"error",
"(",
"\"_do_put() - HTTP response error: \"",
"+",
"\"{}\"",
".",
"format",
"(",
"response",
".",
"status_code",
")",
")",
"raise",
"RuntimeError",
"(",
"\"_do_put() - HTTP response error\"",
"+",
"response",
".",
"status_code",
")",
"except",
":",
"raise",
"RuntimeError",
"(",
"\"_do_put() - Communication error with ScaleIO gateway\"",
")",
"return",
"response"
] | 48.433333 | 25.5 |
def convert_attrs_to_bool(obj: Any,
attrs: Iterable[str],
default: bool = None) -> None:
"""
Applies :func:`convert_to_bool` to the specified attributes of an object,
modifying it in place.
"""
for a in attrs:
setattr(obj, a, convert_to_bool(getattr(obj, a), default=default))
|
[
"def",
"convert_attrs_to_bool",
"(",
"obj",
":",
"Any",
",",
"attrs",
":",
"Iterable",
"[",
"str",
"]",
",",
"default",
":",
"bool",
"=",
"None",
")",
"->",
"None",
":",
"for",
"a",
"in",
"attrs",
":",
"setattr",
"(",
"obj",
",",
"a",
",",
"convert_to_bool",
"(",
"getattr",
"(",
"obj",
",",
"a",
")",
",",
"default",
"=",
"default",
")",
")"
] | 38.666667 | 14.888889 |
def get(self, user_id, client_id, type, fields=None, include_fields=True):
"""List device credentials.
Args:
user_id (str): The user_id of the devices to retrieve.
client_id (str): The client_id of the devices to retrieve.
type (str): The type of credentials (public_key, refresh_token).
fields (list, optional): A list of fields to include or exclude
(depending on include_fields) from the result, empty to
retrieve all fields
include_fields (bool, optional): True if the fields specified are
to be included in the result, False otherwise
(defaults to true)
See: https://auth0.com/docs/api/management/v2#!/Device_Credentials/get_device_credentials
"""
params = {
'fields': fields and ','.join(fields) or None,
'include_fields': str(include_fields).lower(),
'user_id': user_id,
'client_id': client_id,
'type': type,
}
return self.client.get(self._url(), params=params)
|
[
"def",
"get",
"(",
"self",
",",
"user_id",
",",
"client_id",
",",
"type",
",",
"fields",
"=",
"None",
",",
"include_fields",
"=",
"True",
")",
":",
"params",
"=",
"{",
"'fields'",
":",
"fields",
"and",
"','",
".",
"join",
"(",
"fields",
")",
"or",
"None",
",",
"'include_fields'",
":",
"str",
"(",
"include_fields",
")",
".",
"lower",
"(",
")",
",",
"'user_id'",
":",
"user_id",
",",
"'client_id'",
":",
"client_id",
",",
"'type'",
":",
"type",
",",
"}",
"return",
"self",
".",
"client",
".",
"get",
"(",
"self",
".",
"_url",
"(",
")",
",",
"params",
"=",
"params",
")"
] | 36.233333 | 26.7 |
def iter_insert_items(tree):
""" Iterate over the items to insert from an INSERT statement """
if tree.list_values:
keys = tree.attrs
for values in tree.list_values:
if len(keys) != len(values):
raise SyntaxError(
"Values '%s' do not match attributes " "'%s'" % (values, keys)
)
yield dict(zip(keys, map(resolve, values)))
elif tree.map_values:
for item in tree.map_values:
data = {}
for (key, val) in item:
data[key] = resolve(val)
yield data
else:
raise SyntaxError("No insert data found")
|
[
"def",
"iter_insert_items",
"(",
"tree",
")",
":",
"if",
"tree",
".",
"list_values",
":",
"keys",
"=",
"tree",
".",
"attrs",
"for",
"values",
"in",
"tree",
".",
"list_values",
":",
"if",
"len",
"(",
"keys",
")",
"!=",
"len",
"(",
"values",
")",
":",
"raise",
"SyntaxError",
"(",
"\"Values '%s' do not match attributes \"",
"\"'%s'\"",
"%",
"(",
"values",
",",
"keys",
")",
")",
"yield",
"dict",
"(",
"zip",
"(",
"keys",
",",
"map",
"(",
"resolve",
",",
"values",
")",
")",
")",
"elif",
"tree",
".",
"map_values",
":",
"for",
"item",
"in",
"tree",
".",
"map_values",
":",
"data",
"=",
"{",
"}",
"for",
"(",
"key",
",",
"val",
")",
"in",
"item",
":",
"data",
"[",
"key",
"]",
"=",
"resolve",
"(",
"val",
")",
"yield",
"data",
"else",
":",
"raise",
"SyntaxError",
"(",
"\"No insert data found\"",
")"
] | 36.111111 | 12.833333 |
def license(self, key, value):
"""Populate the ``license`` key."""
def _get_license(value):
a_values = force_list(value.get('a'))
oa_licenses = [el for el in a_values if el == 'OA' or el == 'Open Access']
other_licenses = [el for el in a_values if el != 'OA' and el != 'Open Access']
if not other_licenses:
return force_single_element(oa_licenses)
return force_single_element(other_licenses)
def _get_material(value):
material = value.get('3', '').lower()
if material == 'article':
return 'publication'
return material
return {
'imposing': value.get('b'),
'license': _get_license(value),
'material': _get_material(value),
'url': value.get('u'),
}
|
[
"def",
"license",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"def",
"_get_license",
"(",
"value",
")",
":",
"a_values",
"=",
"force_list",
"(",
"value",
".",
"get",
"(",
"'a'",
")",
")",
"oa_licenses",
"=",
"[",
"el",
"for",
"el",
"in",
"a_values",
"if",
"el",
"==",
"'OA'",
"or",
"el",
"==",
"'Open Access'",
"]",
"other_licenses",
"=",
"[",
"el",
"for",
"el",
"in",
"a_values",
"if",
"el",
"!=",
"'OA'",
"and",
"el",
"!=",
"'Open Access'",
"]",
"if",
"not",
"other_licenses",
":",
"return",
"force_single_element",
"(",
"oa_licenses",
")",
"return",
"force_single_element",
"(",
"other_licenses",
")",
"def",
"_get_material",
"(",
"value",
")",
":",
"material",
"=",
"value",
".",
"get",
"(",
"'3'",
",",
"''",
")",
".",
"lower",
"(",
")",
"if",
"material",
"==",
"'article'",
":",
"return",
"'publication'",
"return",
"material",
"return",
"{",
"'imposing'",
":",
"value",
".",
"get",
"(",
"'b'",
")",
",",
"'license'",
":",
"_get_license",
"(",
"value",
")",
",",
"'material'",
":",
"_get_material",
"(",
"value",
")",
",",
"'url'",
":",
"value",
".",
"get",
"(",
"'u'",
")",
",",
"}"
] | 31.958333 | 18.166667 |
def ecp_endpoint(self, ipaddress):
"""
Returns the entity ID of the IdP which the ECP client should talk to
:param ipaddress: The IP address of the user client
:return: IdP entity ID or None
"""
_ecp = self.getattr("ecp")
if _ecp:
for key, eid in _ecp.items():
if re.match(key, ipaddress):
return eid
return None
|
[
"def",
"ecp_endpoint",
"(",
"self",
",",
"ipaddress",
")",
":",
"_ecp",
"=",
"self",
".",
"getattr",
"(",
"\"ecp\"",
")",
"if",
"_ecp",
":",
"for",
"key",
",",
"eid",
"in",
"_ecp",
".",
"items",
"(",
")",
":",
"if",
"re",
".",
"match",
"(",
"key",
",",
"ipaddress",
")",
":",
"return",
"eid",
"return",
"None"
] | 29.5 | 14.928571 |
def close_monomers(self, group, cutoff=4.0):
"""Returns a list of Monomers from within a cut off distance of the Monomer
Parameters
----------
group: BaseAmpal or Subclass
Group to be search for Monomers that are close to this Monomer.
cutoff: float
Distance cut off.
Returns
-------
nearby_residues: [Monomers]
List of Monomers within cut off distance.
"""
nearby_residues = []
for self_atom in self.atoms.values():
nearby_atoms = group.is_within(cutoff, self_atom)
for res_atom in nearby_atoms:
if res_atom.parent not in nearby_residues:
nearby_residues.append(res_atom.parent)
return nearby_residues
|
[
"def",
"close_monomers",
"(",
"self",
",",
"group",
",",
"cutoff",
"=",
"4.0",
")",
":",
"nearby_residues",
"=",
"[",
"]",
"for",
"self_atom",
"in",
"self",
".",
"atoms",
".",
"values",
"(",
")",
":",
"nearby_atoms",
"=",
"group",
".",
"is_within",
"(",
"cutoff",
",",
"self_atom",
")",
"for",
"res_atom",
"in",
"nearby_atoms",
":",
"if",
"res_atom",
".",
"parent",
"not",
"in",
"nearby_residues",
":",
"nearby_residues",
".",
"append",
"(",
"res_atom",
".",
"parent",
")",
"return",
"nearby_residues"
] | 35.227273 | 15.954545 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.