nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
Abjad/abjad
d0646dfbe83db3dc5ab268f76a0950712b87b7fd
abjad/pitch/segments.py
python
IntervalClassSegment.from_selection
(class_, selection, item_class=None)
return class_(items=intervals, item_class=item_class)
Initializes interval-class segment from component selection. .. container:: example >>> staff_1 = abjad.Staff("c'4 <d' fs' a'>4 b2") >>> staff_2 = abjad.Staff("c4. r8 g2") >>> selection = abjad.select((staff_1, staff_2)) >>> abjad.IntervalClassSegment.from_selection(selection) IntervalClassSegment(['-M2', '-M3', '-m3', '+m7', '+M7', '-P5']) Returns interval-class segment.
Initializes interval-class segment from component selection.
[ "Initializes", "interval", "-", "class", "segment", "from", "component", "selection", "." ]
def from_selection(class_, selection, item_class=None): """ Initializes interval-class segment from component selection. .. container:: example >>> staff_1 = abjad.Staff("c'4 <d' fs' a'>4 b2") >>> staff_2 = abjad.Staff("c4. r8 g2") >>> selection = abjad.select((staff_1, staff_2)) >>> abjad.IntervalClassSegment.from_selection(selection) IntervalClassSegment(['-M2', '-M3', '-m3', '+m7', '+M7', '-P5']) Returns interval-class segment. """ pitch_segment = PitchSegment.from_selection(selection) pitches = [_ for _ in pitch_segment] intervals = math.difference_series(pitches) return class_(items=intervals, item_class=item_class)
[ "def", "from_selection", "(", "class_", ",", "selection", ",", "item_class", "=", "None", ")", ":", "pitch_segment", "=", "PitchSegment", ".", "from_selection", "(", "selection", ")", "pitches", "=", "[", "_", "for", "_", "in", "pitch_segment", "]", "intervals", "=", "math", ".", "difference_series", "(", "pitches", ")", "return", "class_", "(", "items", "=", "intervals", ",", "item_class", "=", "item_class", ")" ]
https://github.com/Abjad/abjad/blob/d0646dfbe83db3dc5ab268f76a0950712b87b7fd/abjad/pitch/segments.py#L213-L230
rembo10/headphones
b3199605be1ebc83a7a8feab6b1e99b64014187c
lib/yaml/__init__.py
python
YAMLObjectMetaclass.__init__
(cls, name, bases, kwds)
[]
def __init__(cls, name, bases, kwds): super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds) if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None: cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml) cls.yaml_dumper.add_representer(cls, cls.to_yaml)
[ "def", "__init__", "(", "cls", ",", "name", ",", "bases", ",", "kwds", ")", ":", "super", "(", "YAMLObjectMetaclass", ",", "cls", ")", ".", "__init__", "(", "name", ",", "bases", ",", "kwds", ")", "if", "'yaml_tag'", "in", "kwds", "and", "kwds", "[", "'yaml_tag'", "]", "is", "not", "None", ":", "cls", ".", "yaml_loader", ".", "add_constructor", "(", "cls", ".", "yaml_tag", ",", "cls", ".", "from_yaml", ")", "cls", ".", "yaml_dumper", ".", "add_representer", "(", "cls", ",", "cls", ".", "to_yaml", ")" ]
https://github.com/rembo10/headphones/blob/b3199605be1ebc83a7a8feab6b1e99b64014187c/lib/yaml/__init__.py#L280-L284
fbchat-dev/fbchat
916a14062d31f3624dfe8dd4ab672648a3e508c0
fbchat/_listen.py
python
get_cookie_header
(session: requests.Session, url: str)
return requests.cookies.get_cookie_header( session.cookies, requests.Request("GET", url), )
Extract a cookie header from a requests session.
Extract a cookie header from a requests session.
[ "Extract", "a", "cookie", "header", "from", "a", "requests", "session", "." ]
def get_cookie_header(session: requests.Session, url: str) -> str: """Extract a cookie header from a requests session.""" # The cookies are extracted this way to make sure they're escaped correctly return requests.cookies.get_cookie_header( session.cookies, requests.Request("GET", url), )
[ "def", "get_cookie_header", "(", "session", ":", "requests", ".", "Session", ",", "url", ":", "str", ")", "->", "str", ":", "# The cookies are extracted this way to make sure they're escaped correctly", "return", "requests", ".", "cookies", ".", "get_cookie_header", "(", "session", ".", "cookies", ",", "requests", ".", "Request", "(", "\"GET\"", ",", "url", ")", ",", ")" ]
https://github.com/fbchat-dev/fbchat/blob/916a14062d31f3624dfe8dd4ab672648a3e508c0/fbchat/_listen.py#L51-L56
entropy1337/infernal-twin
10995cd03312e39a48ade0f114ebb0ae3a711bb8
Modules/build/reportlab/src/reportlab/graphics/shapes.py
python
_repr
(self,I=None)
return a repr style string with named fixed args first, then keywords
return a repr style string with named fixed args first, then keywords
[ "return", "a", "repr", "style", "string", "with", "named", "fixed", "args", "first", "then", "keywords" ]
def _repr(self,I=None): '''return a repr style string with named fixed args first, then keywords''' if isinstance(self,float): return fp_str(self) elif isSeq(self): s = '' for v in self: s = s + '%s,' % _repr(v,I) if isinstance(self,list): return '[%s]' % s[:-1] else: return '(%s%s)' % (s[:-1],len(self)==1 and ',' or '') elif self is EmptyClipPath: if I: _addObjImport(self,I,'EmptyClipPath') return 'EmptyClipPath' elif isinstance(self,Shape): if I: _addObjImport(self,I) from inspect import getargs args, varargs, varkw = getargs(self.__init__.__func__.__code__) P = self.getProperties() s = self.__class__.__name__+'(' for n in args[1:]: v = P[n] del P[n] s = s + '%s,' % _repr(v,I) for n,v in P.items(): v = P[n] s = s + '%s=%s,' % (n, _repr(v,I)) return s[:-1]+')' else: return repr(self)
[ "def", "_repr", "(", "self", ",", "I", "=", "None", ")", ":", "if", "isinstance", "(", "self", ",", "float", ")", ":", "return", "fp_str", "(", "self", ")", "elif", "isSeq", "(", "self", ")", ":", "s", "=", "''", "for", "v", "in", "self", ":", "s", "=", "s", "+", "'%s,'", "%", "_repr", "(", "v", ",", "I", ")", "if", "isinstance", "(", "self", ",", "list", ")", ":", "return", "'[%s]'", "%", "s", "[", ":", "-", "1", "]", "else", ":", "return", "'(%s%s)'", "%", "(", "s", "[", ":", "-", "1", "]", ",", "len", "(", "self", ")", "==", "1", "and", "','", "or", "''", ")", "elif", "self", "is", "EmptyClipPath", ":", "if", "I", ":", "_addObjImport", "(", "self", ",", "I", ",", "'EmptyClipPath'", ")", "return", "'EmptyClipPath'", "elif", "isinstance", "(", "self", ",", "Shape", ")", ":", "if", "I", ":", "_addObjImport", "(", "self", ",", "I", ")", "from", "inspect", "import", "getargs", "args", ",", "varargs", ",", "varkw", "=", "getargs", "(", "self", ".", "__init__", ".", "__func__", ".", "__code__", ")", "P", "=", "self", ".", "getProperties", "(", ")", "s", "=", "self", ".", "__class__", ".", "__name__", "+", "'('", "for", "n", "in", "args", "[", "1", ":", "]", ":", "v", "=", "P", "[", "n", "]", "del", "P", "[", "n", "]", "s", "=", "s", "+", "'%s,'", "%", "_repr", "(", "v", ",", "I", ")", "for", "n", ",", "v", "in", "P", ".", "items", "(", ")", ":", "v", "=", "P", "[", "n", "]", "s", "=", "s", "+", "'%s=%s,'", "%", "(", "n", ",", "_repr", "(", "v", ",", "I", ")", ")", "return", "s", "[", ":", "-", "1", "]", "+", "')'", "else", ":", "return", "repr", "(", "self", ")" ]
https://github.com/entropy1337/infernal-twin/blob/10995cd03312e39a48ade0f114ebb0ae3a711bb8/Modules/build/reportlab/src/reportlab/graphics/shapes.py#L567-L597
mlcommons/training
4a4d5a0b7efe99c680306b1940749211d4238a84
image_classification/tensorflow2/common.py
python
get_num_train_iterations
(flags_obj)
return steps_per_epoch, train_epochs
Returns the number of training steps, train and test epochs.
Returns the number of training steps, train and test epochs.
[ "Returns", "the", "number", "of", "training", "steps", "train", "and", "test", "epochs", "." ]
def get_num_train_iterations(flags_obj): """Returns the number of training steps, train and test epochs.""" if flags_obj.drop_train_remainder: steps_per_epoch = ( imagenet_preprocessing.NUM_IMAGES['train'] // flags_obj.batch_size) else: steps_per_epoch = ( math.ceil(1.0 * imagenet_preprocessing.NUM_IMAGES['train'] / flags_obj.batch_size)) train_epochs = flags_obj.train_epochs # if mutliple epochs, ignore the train_steps flag. if train_epochs <= 1 and flags_obj.train_steps: steps_per_epoch = min(flags_obj.train_steps, steps_per_epoch) train_epochs = 1 else: eval_offset_epochs = flags_obj.eval_offset_epochs epochs_between_evals = flags_obj.epochs_between_evals train_epochs = eval_offset_epochs + math.ceil( (train_epochs - eval_offset_epochs) / epochs_between_evals) * epochs_between_evals return steps_per_epoch, train_epochs
[ "def", "get_num_train_iterations", "(", "flags_obj", ")", ":", "if", "flags_obj", ".", "drop_train_remainder", ":", "steps_per_epoch", "=", "(", "imagenet_preprocessing", ".", "NUM_IMAGES", "[", "'train'", "]", "//", "flags_obj", ".", "batch_size", ")", "else", ":", "steps_per_epoch", "=", "(", "math", ".", "ceil", "(", "1.0", "*", "imagenet_preprocessing", ".", "NUM_IMAGES", "[", "'train'", "]", "/", "flags_obj", ".", "batch_size", ")", ")", "train_epochs", "=", "flags_obj", ".", "train_epochs", "# if mutliple epochs, ignore the train_steps flag.", "if", "train_epochs", "<=", "1", "and", "flags_obj", ".", "train_steps", ":", "steps_per_epoch", "=", "min", "(", "flags_obj", ".", "train_steps", ",", "steps_per_epoch", ")", "train_epochs", "=", "1", "else", ":", "eval_offset_epochs", "=", "flags_obj", ".", "eval_offset_epochs", "epochs_between_evals", "=", "flags_obj", ".", "epochs_between_evals", "train_epochs", "=", "eval_offset_epochs", "+", "math", ".", "ceil", "(", "(", "train_epochs", "-", "eval_offset_epochs", ")", "/", "epochs_between_evals", ")", "*", "epochs_between_evals", "return", "steps_per_epoch", ",", "train_epochs" ]
https://github.com/mlcommons/training/blob/4a4d5a0b7efe99c680306b1940749211d4238a84/image_classification/tensorflow2/common.py#L635-L657
evennia/evennia
fa79110ba6b219932f22297838e8ac72ebc0be0e
evennia/locks/lockhandler.py
python
LockHandler.clear
(self)
Remove all locks in the handler.
Remove all locks in the handler.
[ "Remove", "all", "locks", "in", "the", "handler", "." ]
def clear(self): """ Remove all locks in the handler. """ self.locks = {} self.lock_storage = "" self._save_locks()
[ "def", "clear", "(", "self", ")", ":", "self", ".", "locks", "=", "{", "}", "self", ".", "lock_storage", "=", "\"\"", "self", ".", "_save_locks", "(", ")" ]
https://github.com/evennia/evennia/blob/fa79110ba6b219932f22297838e8ac72ebc0be0e/evennia/locks/lockhandler.py#L464-L471
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/werkzeug/debug/__init__.py
python
DebuggedApplication._get_pin
(self)
return self._pin
[]
def _get_pin(self): if not hasattr(self, '_pin'): self._pin, self._pin_cookie = get_pin_and_cookie_name(self.app) return self._pin
[ "def", "_get_pin", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_pin'", ")", ":", "self", ".", "_pin", ",", "self", ".", "_pin_cookie", "=", "get_pin_and_cookie_name", "(", "self", ".", "app", ")", "return", "self", ".", "_pin" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/werkzeug/debug/__init__.py#L266-L269
oilshell/oil
94388e7d44a9ad879b12615f6203b38596b5a2d3
Python-2.7.13/Lib/lib-tk/Tix.py
python
ScrolledWindow.__init__
(self, master, cnf={}, **kw)
[]
def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixScrolledWindow', ['options'], cnf, kw) self.subwidget_list['window'] = _dummyFrame(self, 'window') self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb') self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
[ "def", "__init__", "(", "self", ",", "master", ",", "cnf", "=", "{", "}", ",", "*", "*", "kw", ")", ":", "TixWidget", ".", "__init__", "(", "self", ",", "master", ",", "'tixScrolledWindow'", ",", "[", "'options'", "]", ",", "cnf", ",", "kw", ")", "self", ".", "subwidget_list", "[", "'window'", "]", "=", "_dummyFrame", "(", "self", ",", "'window'", ")", "self", ".", "subwidget_list", "[", "'vsb'", "]", "=", "_dummyScrollbar", "(", "self", ",", "'vsb'", ")", "self", ".", "subwidget_list", "[", "'hsb'", "]", "=", "_dummyScrollbar", "(", "self", ",", "'hsb'", ")" ]
https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/Python-2.7.13/Lib/lib-tk/Tix.py#L1350-L1354
twilio/twilio-python
6e1e811ea57a1edfadd5161ace87397c563f6915
twilio/rest/api/v2010/account/new_key.py
python
NewKeyPage.__init__
(self, version, response, solution)
Initialize the NewKeyPage :param Version version: Version that contains the resource :param Response response: Response from the API :param account_sid: A 34 character string that uniquely identifies this resource. :returns: twilio.rest.api.v2010.account.new_key.NewKeyPage :rtype: twilio.rest.api.v2010.account.new_key.NewKeyPage
Initialize the NewKeyPage
[ "Initialize", "the", "NewKeyPage" ]
def __init__(self, version, response, solution): """ Initialize the NewKeyPage :param Version version: Version that contains the resource :param Response response: Response from the API :param account_sid: A 34 character string that uniquely identifies this resource. :returns: twilio.rest.api.v2010.account.new_key.NewKeyPage :rtype: twilio.rest.api.v2010.account.new_key.NewKeyPage """ super(NewKeyPage, self).__init__(version, response) # Path Solution self._solution = solution
[ "def", "__init__", "(", "self", ",", "version", ",", "response", ",", "solution", ")", ":", "super", "(", "NewKeyPage", ",", "self", ")", ".", "__init__", "(", "version", ",", "response", ")", "# Path Solution", "self", ".", "_solution", "=", "solution" ]
https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/api/v2010/account/new_key.py#L61-L75
CalebBell/thermo
572a47d1b03d49fe609b8d5f826fa6a7cde00828
thermo/eos.py
python
PRTranslatedPPJP.__init__
(self, Tc, Pc, omega, c=0.0, T=None, P=None, V=None)
[]
def __init__(self, Tc, Pc, omega, c=0.0, T=None, P=None, V=None): self.Tc = Tc self.Pc = Pc self.omega = omega self.T = T self.P = P self.V = V Pc_inv = 1.0/Pc self.a = self.c1*R2*Tc*Tc*Pc_inv self.c = c # 0.3919 + 1.4996*omega - 0.2721*omega**2+0.1063*omega**3 self.kappa = omega*(omega*(0.1063*omega - 0.2721) + 1.4996) + 0.3919 self.kwargs = {'c': c} b0 = self.c2*R*Tc*Pc_inv self.b = b = b0 - c self.delta = 2.0*(c + b0) self.epsilon = -b0*b0 + c*c + 2.0*c*b0 self.solve()
[ "def", "__init__", "(", "self", ",", "Tc", ",", "Pc", ",", "omega", ",", "c", "=", "0.0", ",", "T", "=", "None", ",", "P", "=", "None", ",", "V", "=", "None", ")", ":", "self", ".", "Tc", "=", "Tc", "self", ".", "Pc", "=", "Pc", "self", ".", "omega", "=", "omega", "self", ".", "T", "=", "T", "self", ".", "P", "=", "P", "self", ".", "V", "=", "V", "Pc_inv", "=", "1.0", "/", "Pc", "self", ".", "a", "=", "self", ".", "c1", "*", "R2", "*", "Tc", "*", "Tc", "*", "Pc_inv", "self", ".", "c", "=", "c", "# 0.3919 + 1.4996*omega - 0.2721*omega**2+0.1063*omega**3", "self", ".", "kappa", "=", "omega", "*", "(", "omega", "*", "(", "0.1063", "*", "omega", "-", "0.2721", ")", "+", "1.4996", ")", "+", "0.3919", "self", ".", "kwargs", "=", "{", "'c'", ":", "c", "}", "b0", "=", "self", ".", "c2", "*", "R", "*", "Tc", "*", "Pc_inv", "self", ".", "b", "=", "b", "=", "b0", "-", "c", "self", ".", "delta", "=", "2.0", "*", "(", "c", "+", "b0", ")", "self", ".", "epsilon", "=", "-", "b0", "*", "b0", "+", "c", "*", "c", "+", "2.0", "*", "c", "*", "b0", "self", ".", "solve", "(", ")" ]
https://github.com/CalebBell/thermo/blob/572a47d1b03d49fe609b8d5f826fa6a7cde00828/thermo/eos.py#L8217-L8236
learningequality/ka-lite
571918ea668013dcf022286ea85eff1c5333fb8b
kalite/packages/bundled/django/contrib/localflavor/cl/forms.py
python
CLRutField._algorithm
(self, rut)
return '0123456789K0'[11 - suma % 11]
Takes RUT in pure canonical form, calculates the verifier digit.
Takes RUT in pure canonical form, calculates the verifier digit.
[ "Takes", "RUT", "in", "pure", "canonical", "form", "calculates", "the", "verifier", "digit", "." ]
def _algorithm(self, rut): """ Takes RUT in pure canonical form, calculates the verifier digit. """ suma = 0 multi = 2 for r in rut[::-1]: suma += int(r) * multi multi += 1 if multi == 8: multi = 2 return '0123456789K0'[11 - suma % 11]
[ "def", "_algorithm", "(", "self", ",", "rut", ")", ":", "suma", "=", "0", "multi", "=", "2", "for", "r", "in", "rut", "[", ":", ":", "-", "1", "]", ":", "suma", "+=", "int", "(", "r", ")", "*", "multi", "multi", "+=", "1", "if", "multi", "==", "8", ":", "multi", "=", "2", "return", "'0123456789K0'", "[", "11", "-", "suma", "%", "11", "]" ]
https://github.com/learningequality/ka-lite/blob/571918ea668013dcf022286ea85eff1c5333fb8b/kalite/packages/bundled/django/contrib/localflavor/cl/forms.py#L60-L71
omz/PythonistaAppTemplate
f560f93f8876d82a21d108977f90583df08d55af
PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/sympy/strategies/rl.py
python
glom
(key, count, combine)
return conglomerate
Create a rule to conglomerate identical args >>> from sympy.strategies import glom >>> from sympy import Add >>> from sympy.abc import x >>> key = lambda x: x.as_coeff_Mul()[1] >>> count = lambda x: x.as_coeff_Mul()[0] >>> combine = lambda cnt, arg: cnt * arg >>> rl = glom(key, count, combine) >>> rl(Add(x, -x, 3*x, 2, 3, evaluate=False)) 3*x + 5 Wait, how are key, count and combine supposed to work? >>> key(2*x) x >>> count(2*x) 2 >>> combine(2, x) 2*x
Create a rule to conglomerate identical args
[ "Create", "a", "rule", "to", "conglomerate", "identical", "args" ]
def glom(key, count, combine): """ Create a rule to conglomerate identical args >>> from sympy.strategies import glom >>> from sympy import Add >>> from sympy.abc import x >>> key = lambda x: x.as_coeff_Mul()[1] >>> count = lambda x: x.as_coeff_Mul()[0] >>> combine = lambda cnt, arg: cnt * arg >>> rl = glom(key, count, combine) >>> rl(Add(x, -x, 3*x, 2, 3, evaluate=False)) 3*x + 5 Wait, how are key, count and combine supposed to work? >>> key(2*x) x >>> count(2*x) 2 >>> combine(2, x) 2*x """ def conglomerate(expr): """ Conglomerate together identical args x + x -> 2x """ groups = sift(expr.args, key) counts = dict((k, sum(map(count, args))) for k, args in groups.items()) newargs = [combine(cnt, mat) for mat, cnt in counts.items()] if set(newargs) != set(expr.args): return new(type(expr), *newargs) else: return expr return conglomerate
[ "def", "glom", "(", "key", ",", "count", ",", "combine", ")", ":", "def", "conglomerate", "(", "expr", ")", ":", "\"\"\" Conglomerate together identical args x + x -> 2x \"\"\"", "groups", "=", "sift", "(", "expr", ".", "args", ",", "key", ")", "counts", "=", "dict", "(", "(", "k", ",", "sum", "(", "map", "(", "count", ",", "args", ")", ")", ")", "for", "k", ",", "args", "in", "groups", ".", "items", "(", ")", ")", "newargs", "=", "[", "combine", "(", "cnt", ",", "mat", ")", "for", "mat", ",", "cnt", "in", "counts", ".", "items", "(", ")", "]", "if", "set", "(", "newargs", ")", "!=", "set", "(", "expr", ".", "args", ")", ":", "return", "new", "(", "type", "(", "expr", ")", ",", "*", "newargs", ")", "else", ":", "return", "expr", "return", "conglomerate" ]
https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/sympy/strategies/rl.py#L42-L75
EricssonResearch/calvin-base
bc4645c2061c30ca305a660e48dc86e3317f5b6f
calvin/runtime/south/async/__init__.py
python
get_framework
()
return _FW_PATH
Get the framework used on the runtime
Get the framework used on the runtime
[ "Get", "the", "framework", "used", "on", "the", "runtime" ]
def get_framework(): """ Get the framework used on the runtime """ return _FW_PATH
[ "def", "get_framework", "(", ")", ":", "return", "_FW_PATH" ]
https://github.com/EricssonResearch/calvin-base/blob/bc4645c2061c30ca305a660e48dc86e3317f5b6f/calvin/runtime/south/async/__init__.py#L52-L56
robinjia/adversarial-squad
9cd77aa3b15b71c84052cb875dc8bb6590ae4e9a
src/py/eval_squad.py
python
normalize_answer
(s)
return white_space_fix(remove_articles(remove_punc(lower(s))))
Lower text and remove punctuation, articles and extra whitespace.
Lower text and remove punctuation, articles and extra whitespace.
[ "Lower", "text", "and", "remove", "punctuation", "articles", "and", "extra", "whitespace", "." ]
def normalize_answer(s): """Lower text and remove punctuation, articles and extra whitespace.""" def remove_articles(text): return re.sub(r'\b(a|an|the)\b', ' ', text) def white_space_fix(text): return ' '.join(text.split()) def remove_punc(text): exclude = set(string.punctuation) return ''.join(ch for ch in text if ch not in exclude) def lower(text): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(s))))
[ "def", "normalize_answer", "(", "s", ")", ":", "def", "remove_articles", "(", "text", ")", ":", "return", "re", ".", "sub", "(", "r'\\b(a|an|the)\\b'", ",", "' '", ",", "text", ")", "def", "white_space_fix", "(", "text", ")", ":", "return", "' '", ".", "join", "(", "text", ".", "split", "(", ")", ")", "def", "remove_punc", "(", "text", ")", ":", "exclude", "=", "set", "(", "string", ".", "punctuation", ")", "return", "''", ".", "join", "(", "ch", "for", "ch", "in", "text", "if", "ch", "not", "in", "exclude", ")", "def", "lower", "(", "text", ")", ":", "return", "text", ".", "lower", "(", ")", "return", "white_space_fix", "(", "remove_articles", "(", "remove_punc", "(", "lower", "(", "s", ")", ")", ")", ")" ]
https://github.com/robinjia/adversarial-squad/blob/9cd77aa3b15b71c84052cb875dc8bb6590ae4e9a/src/py/eval_squad.py#L13-L28
tao12345666333/tornado-zh
e9e8519beb147d9e1290f6a4fa7d61123d1ecb1c
tornado/gen.py
python
WaitIterator.next
(self)
return self._running_future
Returns a `.Future` that will yield the next available result. Note that this `.Future` will not be the same object as any of the inputs.
Returns a `.Future` that will yield the next available result.
[ "Returns", "a", ".", "Future", "that", "will", "yield", "the", "next", "available", "result", "." ]
def next(self): """Returns a `.Future` that will yield the next available result. Note that this `.Future` will not be the same object as any of the inputs. """ self._running_future = TracebackFuture() if self._finished: self._return_result(self._finished.popleft()) return self._running_future
[ "def", "next", "(", "self", ")", ":", "self", ".", "_running_future", "=", "TracebackFuture", "(", ")", "if", "self", ".", "_finished", ":", "self", ".", "_return_result", "(", "self", ".", "_finished", ".", "popleft", "(", ")", ")", "return", "self", ".", "_running_future" ]
https://github.com/tao12345666333/tornado-zh/blob/e9e8519beb147d9e1290f6a4fa7d61123d1ecb1c/tornado/gen.py#L419-L430
CvvT/dumpDex
92ab3b7e996194a06bf1dd5538a4954e8a5ee9c1
python/idaapi.py
python
Appcall__.byref
(val)
return PyIdc_cvt_refclass__(val)
Method to create references to immutable objects Currently we support references to int/strings Objects need not be passed by reference (this will be done automatically)
Method to create references to immutable objects Currently we support references to int/strings Objects need not be passed by reference (this will be done automatically)
[ "Method", "to", "create", "references", "to", "immutable", "objects", "Currently", "we", "support", "references", "to", "int", "/", "strings", "Objects", "need", "not", "be", "passed", "by", "reference", "(", "this", "will", "be", "done", "automatically", ")" ]
def byref(val): """ Method to create references to immutable objects Currently we support references to int/strings Objects need not be passed by reference (this will be done automatically) """ return PyIdc_cvt_refclass__(val)
[ "def", "byref", "(", "val", ")", ":", "return", "PyIdc_cvt_refclass__", "(", "val", ")" ]
https://github.com/CvvT/dumpDex/blob/92ab3b7e996194a06bf1dd5538a4954e8a5ee9c1/python/idaapi.py#L3950-L3956
sahana/eden
1696fa50e90ce967df69f66b571af45356cc18da
modules/templates/historic/CRMT/menus.py
python
S3MainMenu.menu_modules
(cls)
return [ # In title_area #MM("Sahana"), MM("Find", link=False)( MM("People", c="pr", f="person", m="summary", always_display=True), MM("Organizations", c="org", f="organisation", m="summary"), MM("Activities", c="project", f="activity", m="summary", always_display=True), MM("Points", c="gis", f="poi", m="summary", vars={"~.location_id$gis_feature_type": 1}, always_display=True), MM("Routes", c="gis", f="poi", m="summary", vars={"~.location_id$gis_feature_type": 2}, always_display=True), MM("Areas", c="gis", f="poi", m="summary", vars={"~.location_id$gis_feature_type": 3}, always_display=True), ), MM("Add", link=False)( MM("Person", c="pr", f="person", args="create", always_display=True), MM("Organization", c="org", f="organisation", args="create"), MM("Activity", c="project", f="activity", args="create", always_display=True), MM("Point", c="gis", f="poi", args="create", vars={"~.location_id$gis_feature_type": 1}, always_display=True), MM("Route", c="gis", f="poi", args="create", vars={"~.location_id$gis_feature_type": 2}, always_display=True), MM("Area", c="gis", f="poi", args="create", vars={"~.location_id$gis_feature_type": 3}, always_display=True), ), MM("Share", link=False)( MM("Maps", c="gis", f="config", args="datalist", always_display=True), MM("Stories", c="cms", f="post", args="datalist", always_display=True), ), MM("Map", c="gis", f="index", always_display=True), ]
Custom Modules Menu
Custom Modules Menu
[ "Custom", "Modules", "Menu" ]
def menu_modules(cls): """ Custom Modules Menu """ return [ # In title_area #MM("Sahana"), MM("Find", link=False)( MM("People", c="pr", f="person", m="summary", always_display=True), MM("Organizations", c="org", f="organisation", m="summary"), MM("Activities", c="project", f="activity", m="summary", always_display=True), MM("Points", c="gis", f="poi", m="summary", vars={"~.location_id$gis_feature_type": 1}, always_display=True), MM("Routes", c="gis", f="poi", m="summary", vars={"~.location_id$gis_feature_type": 2}, always_display=True), MM("Areas", c="gis", f="poi", m="summary", vars={"~.location_id$gis_feature_type": 3}, always_display=True), ), MM("Add", link=False)( MM("Person", c="pr", f="person", args="create", always_display=True), MM("Organization", c="org", f="organisation", args="create"), MM("Activity", c="project", f="activity", args="create", always_display=True), MM("Point", c="gis", f="poi", args="create", vars={"~.location_id$gis_feature_type": 1}, always_display=True), MM("Route", c="gis", f="poi", args="create", vars={"~.location_id$gis_feature_type": 2}, always_display=True), MM("Area", c="gis", f="poi", args="create", vars={"~.location_id$gis_feature_type": 3}, always_display=True), ), MM("Share", link=False)( MM("Maps", c="gis", f="config", args="datalist", always_display=True), MM("Stories", c="cms", f="post", args="datalist", always_display=True), ), MM("Map", c="gis", f="index", always_display=True), ]
[ "def", "menu_modules", "(", "cls", ")", ":", "return", "[", "# In title_area", "#MM(\"Sahana\"),", "MM", "(", "\"Find\"", ",", "link", "=", "False", ")", "(", "MM", "(", "\"People\"", ",", "c", "=", "\"pr\"", ",", "f", "=", "\"person\"", ",", "m", "=", "\"summary\"", ",", "always_display", "=", "True", ")", ",", "MM", "(", "\"Organizations\"", ",", "c", "=", "\"org\"", ",", "f", "=", "\"organisation\"", ",", "m", "=", "\"summary\"", ")", ",", "MM", "(", "\"Activities\"", ",", "c", "=", "\"project\"", ",", "f", "=", "\"activity\"", ",", "m", "=", "\"summary\"", ",", "always_display", "=", "True", ")", ",", "MM", "(", "\"Points\"", ",", "c", "=", "\"gis\"", ",", "f", "=", "\"poi\"", ",", "m", "=", "\"summary\"", ",", "vars", "=", "{", "\"~.location_id$gis_feature_type\"", ":", "1", "}", ",", "always_display", "=", "True", ")", ",", "MM", "(", "\"Routes\"", ",", "c", "=", "\"gis\"", ",", "f", "=", "\"poi\"", ",", "m", "=", "\"summary\"", ",", "vars", "=", "{", "\"~.location_id$gis_feature_type\"", ":", "2", "}", ",", "always_display", "=", "True", ")", ",", "MM", "(", "\"Areas\"", ",", "c", "=", "\"gis\"", ",", "f", "=", "\"poi\"", ",", "m", "=", "\"summary\"", ",", "vars", "=", "{", "\"~.location_id$gis_feature_type\"", ":", "3", "}", ",", "always_display", "=", "True", ")", ",", ")", ",", "MM", "(", "\"Add\"", ",", "link", "=", "False", ")", "(", "MM", "(", "\"Person\"", ",", "c", "=", "\"pr\"", ",", "f", "=", "\"person\"", ",", "args", "=", "\"create\"", ",", "always_display", "=", "True", ")", ",", "MM", "(", "\"Organization\"", ",", "c", "=", "\"org\"", ",", "f", "=", "\"organisation\"", ",", "args", "=", "\"create\"", ")", ",", "MM", "(", "\"Activity\"", ",", "c", "=", "\"project\"", ",", "f", "=", "\"activity\"", ",", "args", "=", "\"create\"", ",", "always_display", "=", "True", ")", ",", "MM", "(", "\"Point\"", ",", "c", "=", "\"gis\"", ",", "f", "=", "\"poi\"", ",", "args", "=", "\"create\"", ",", "vars", "=", "{", "\"~.location_id$gis_feature_type\"", ":", "1", "}", ",", "always_display", "=", "True", ")", ",", "MM", "(", "\"Route\"", ",", "c", "=", "\"gis\"", ",", "f", "=", "\"poi\"", ",", "args", "=", "\"create\"", ",", "vars", "=", "{", "\"~.location_id$gis_feature_type\"", ":", "2", "}", ",", "always_display", "=", "True", ")", ",", "MM", "(", "\"Area\"", ",", "c", "=", "\"gis\"", ",", "f", "=", "\"poi\"", ",", "args", "=", "\"create\"", ",", "vars", "=", "{", "\"~.location_id$gis_feature_type\"", ":", "3", "}", ",", "always_display", "=", "True", ")", ",", ")", ",", "MM", "(", "\"Share\"", ",", "link", "=", "False", ")", "(", "MM", "(", "\"Maps\"", ",", "c", "=", "\"gis\"", ",", "f", "=", "\"config\"", ",", "args", "=", "\"datalist\"", ",", "always_display", "=", "True", ")", ",", "MM", "(", "\"Stories\"", ",", "c", "=", "\"cms\"", ",", "f", "=", "\"post\"", ",", "args", "=", "\"datalist\"", ",", "always_display", "=", "True", ")", ",", ")", ",", "MM", "(", "\"Map\"", ",", "c", "=", "\"gis\"", ",", "f", "=", "\"index\"", ",", "always_display", "=", "True", ")", ",", "]" ]
https://github.com/sahana/eden/blob/1696fa50e90ce967df69f66b571af45356cc18da/modules/templates/historic/CRMT/menus.py#L46-L88
plotly/plotly.py
cfad7862594b35965c0e000813bd7805e8494a5b
packages/python/plotly/plotly/graph_objs/splom/_hoverlabel.py
python
Hoverlabel.bordercolor
(self)
return self["bordercolor"]
Sets the border color of the hover labels for this trace. The 'bordercolor' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen - A list or array of any of the above Returns ------- str|numpy.ndarray
Sets the border color of the hover labels for this trace. The 'bordercolor' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen - A list or array of any of the above
[ "Sets", "the", "border", "color", "of", "the", "hover", "labels", "for", "this", "trace", ".", "The", "bordercolor", "property", "is", "a", "color", "and", "may", "be", "specified", "as", ":", "-", "A", "hex", "string", "(", "e", ".", "g", ".", "#ff0000", ")", "-", "An", "rgb", "/", "rgba", "string", "(", "e", ".", "g", ".", "rgb", "(", "255", "0", "0", ")", ")", "-", "An", "hsl", "/", "hsla", "string", "(", "e", ".", "g", ".", "hsl", "(", "0", "100%", "50%", ")", ")", "-", "An", "hsv", "/", "hsva", "string", "(", "e", ".", "g", ".", "hsv", "(", "0", "100%", "100%", ")", ")", "-", "A", "named", "CSS", "color", ":", "aliceblue", "antiquewhite", "aqua", "aquamarine", "azure", "beige", "bisque", "black", "blanchedalmond", "blue", "blueviolet", "brown", "burlywood", "cadetblue", "chartreuse", "chocolate", "coral", "cornflowerblue", "cornsilk", "crimson", "cyan", "darkblue", "darkcyan", "darkgoldenrod", "darkgray", "darkgrey", "darkgreen", "darkkhaki", "darkmagenta", "darkolivegreen", "darkorange", "darkorchid", "darkred", "darksalmon", "darkseagreen", "darkslateblue", "darkslategray", "darkslategrey", "darkturquoise", "darkviolet", "deeppink", "deepskyblue", "dimgray", "dimgrey", "dodgerblue", "firebrick", "floralwhite", "forestgreen", "fuchsia", "gainsboro", "ghostwhite", "gold", "goldenrod", "gray", "grey", "green", "greenyellow", "honeydew", "hotpink", "indianred", "indigo", "ivory", "khaki", "lavender", "lavenderblush", "lawngreen", "lemonchiffon", "lightblue", "lightcoral", "lightcyan", "lightgoldenrodyellow", "lightgray", "lightgrey", "lightgreen", "lightpink", "lightsalmon", "lightseagreen", "lightskyblue", "lightslategray", "lightslategrey", "lightsteelblue", "lightyellow", "lime", "limegreen", "linen", "magenta", "maroon", "mediumaquamarine", "mediumblue", "mediumorchid", "mediumpurple", "mediumseagreen", "mediumslateblue", "mediumspringgreen", "mediumturquoise", "mediumvioletred", "midnightblue", "mintcream", "mistyrose", "moccasin", "navajowhite", "navy", "oldlace", "olive", "olivedrab", "orange", "orangered", "orchid", "palegoldenrod", "palegreen", "paleturquoise", "palevioletred", "papayawhip", "peachpuff", "peru", "pink", "plum", "powderblue", "purple", "red", "rosybrown", "royalblue", "rebeccapurple", "saddlebrown", "salmon", "sandybrown", "seagreen", "seashell", "sienna", "silver", "skyblue", "slateblue", "slategray", "slategrey", "snow", "springgreen", "steelblue", "tan", "teal", "thistle", "tomato", "turquoise", "violet", "wheat", "white", "whitesmoke", "yellow", "yellowgreen", "-", "A", "list", "or", "array", "of", "any", "of", "the", "above" ]
def bordercolor(self): """ Sets the border color of the hover labels for this trace. The 'bordercolor' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen - A list or array of any of the above Returns ------- str|numpy.ndarray """ return self["bordercolor"]
[ "def", "bordercolor", "(", "self", ")", ":", "return", "self", "[", "\"bordercolor\"", "]" ]
https://github.com/plotly/plotly.py/blob/cfad7862594b35965c0e000813bd7805e8494a5b/packages/python/plotly/plotly/graph_objs/splom/_hoverlabel.py#L150-L201
gem/oq-engine
1bdb88f3914e390abcbd285600bfd39477aae47c
openquake/hazardlib/calc/disagg.py
python
assert_same_shape
(arrays)
Raises an AssertionError if the shapes are not consistent
Raises an AssertionError if the shapes are not consistent
[ "Raises", "an", "AssertionError", "if", "the", "shapes", "are", "not", "consistent" ]
def assert_same_shape(arrays): """ Raises an AssertionError if the shapes are not consistent """ shape = arrays[0].shape for arr in arrays[1:]: assert arr.shape == shape, (arr.shape, shape)
[ "def", "assert_same_shape", "(", "arrays", ")", ":", "shape", "=", "arrays", "[", "0", "]", ".", "shape", "for", "arr", "in", "arrays", "[", "1", ":", "]", ":", "assert", "arr", ".", "shape", "==", "shape", ",", "(", "arr", ".", "shape", ",", "shape", ")" ]
https://github.com/gem/oq-engine/blob/1bdb88f3914e390abcbd285600bfd39477aae47c/openquake/hazardlib/calc/disagg.py#L45-L51
sagemath/sage
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
src/sage_setup/autogen/interpreters/specs/cc.py
python
CCInterpreter.__init__
(self)
r""" Initialize a CCInterpreter. EXAMPLES:: sage: from sage_setup.autogen.interpreters import * sage: interp = CCInterpreter() sage: interp.name 'cc' sage: interp.mc_py_constants {MC:py_constants} sage: interp.chunks [{MC:args}, {MC:retval}, {MC:constants}, {MC:py_constants}, {MC:stack}, {MC:code}, {MC:domain}] sage: interp.pg('A[D]', 'S') ([({MC:args}, {MC:code}, None)], [({MC:stack}, None, None)]) sage: instrs = dict([(ins.name, ins) for ins in interp.instr_descs]) sage: instrs['add'] add: SS->S = 'mpc_add(o0, i0, i1, MPC_RNDNN);' sage: instrs['py_call'] py_call: *->S = '\n if (!cc_py_call...goto error;\n}\n' That py_call instruction is particularly interesting, and demonstrates a useful technique to let you use Cython code in an interpreter. Let's look more closely:: sage: print(instrs['py_call'].code) <BLANKLINE> if (!cc_py_call_helper(domain, i0, n_i1, i1, o0)) { goto error; } <BLANKLINE> This instruction makes use of the function cc_py_call_helper, which is declared:: sage: print(interp.c_header) <BLANKLINE> #include <mpc.h> #include "sage/ext/interpreters/wrapper_cc.h" <BLANKLINE> So instructions where you need to interact with Python can call back into Cython code fairly easily.
r""" Initialize a CCInterpreter.
[ "r", "Initialize", "a", "CCInterpreter", "." ]
def __init__(self): r""" Initialize a CCInterpreter. EXAMPLES:: sage: from sage_setup.autogen.interpreters import * sage: interp = CCInterpreter() sage: interp.name 'cc' sage: interp.mc_py_constants {MC:py_constants} sage: interp.chunks [{MC:args}, {MC:retval}, {MC:constants}, {MC:py_constants}, {MC:stack}, {MC:code}, {MC:domain}] sage: interp.pg('A[D]', 'S') ([({MC:args}, {MC:code}, None)], [({MC:stack}, None, None)]) sage: instrs = dict([(ins.name, ins) for ins in interp.instr_descs]) sage: instrs['add'] add: SS->S = 'mpc_add(o0, i0, i1, MPC_RNDNN);' sage: instrs['py_call'] py_call: *->S = '\n if (!cc_py_call...goto error;\n}\n' That py_call instruction is particularly interesting, and demonstrates a useful technique to let you use Cython code in an interpreter. Let's look more closely:: sage: print(instrs['py_call'].code) <BLANKLINE> if (!cc_py_call_helper(domain, i0, n_i1, i1, o0)) { goto error; } <BLANKLINE> This instruction makes use of the function cc_py_call_helper, which is declared:: sage: print(interp.c_header) <BLANKLINE> #include <mpc.h> #include "sage/ext/interpreters/wrapper_cc.h" <BLANKLINE> So instructions where you need to interact with Python can call back into Cython code fairly easily. """ mc_retval = MemoryChunkCCRetval('retval', ty_mpc) super(CCInterpreter, self).__init__(ty_mpc, mc_retval=mc_retval) self.err_return = '0' self.mc_py_constants = MemoryChunkConstants('py_constants', ty_python) self.mc_domain = MemoryChunkPyConstant('domain') self.chunks = [self.mc_args, self.mc_retval, self.mc_constants, self.mc_py_constants, self.mc_stack, self.mc_code, self.mc_domain] pg = params_gen(A=self.mc_args, C=self.mc_constants, D=self.mc_code, S=self.mc_stack, P=self.mc_py_constants) self.pg = pg self.c_header = ri(0, ''' #include <mpc.h> #include "sage/ext/interpreters/wrapper_cc.h" ''') self.pxd_header = ri(0, """ from sage.rings.real_mpfr cimport RealNumber from sage.libs.mpfr cimport * from sage.rings.complex_mpfr cimport ComplexNumber from sage.libs.mpc cimport * """) self.pyx_header = ri(0, """\ # distutils: libraries = mpfr mpc gmp cdef public bint cc_py_call_helper(object domain, object fn, int n_args, mpc_t* args, mpc_t retval) except 0: py_args = [] cdef int i cdef ComplexNumber ZERO=domain.zero() cdef ComplexNumber cn for i from 0 <= i < n_args: cn = ZERO._new() mpfr_set(cn.__re, mpc_realref(args[i]), MPFR_RNDN) mpfr_set(cn.__im, mpc_imagref(args[i]), MPFR_RNDN) py_args.append(cn) cdef ComplexNumber result = domain(fn(*py_args)) mpc_set_fr_fr(retval, result.__re,result.__im, MPC_RNDNN) return 1 """) instrs = [ InstrSpec('load_arg', pg('A[D]', 'S'), code='mpc_set(o0, i0, MPC_RNDNN);'), InstrSpec('load_const', pg('C[D]', 'S'), code='mpc_set(o0, i0, MPC_RNDNN);'), InstrSpec('return', pg('S', ''), code='mpc_set(retval, i0, MPC_RNDNN);\nreturn 1;\n'), InstrSpec('py_call', pg('P[D]S@D', 'S'), uses_error_handler=True, code=""" if (!cc_py_call_helper(domain, i0, n_i1, i1, o0)) { goto error; } """) ] for (name, op) in [('add', 'mpc_add'), ('sub', 'mpc_sub'), ('mul', 'mpc_mul'), ('div', 'mpc_div'), ('pow', 'mpc_pow')]: instrs.append(instr_funcall_2args_mpc(name, pg('SS', 'S'), op)) instrs.append(instr_funcall_2args_mpc('ipow', pg('SD', 'S'), 'mpc_pow_si')) for name in ['neg', 'log', 'log10', 'exp', 'cos', 'sin', 'tan', 'acos', 'asin', 'atan', 'cosh', 'sinh', 'tanh', 'acosh', 'asinh', 'atanh']: instrs.append(instr_funcall_1arg_mpc(name, pg('S', 'S'), 'mpc_' + name)) # mpc_ui_div constructs a temporary mpc_t and then calls mpc_div; # it would probably be (slightly) faster to use a permanent copy # of "one" (on the other hand, the constructed temporary copy is # on the stack, so it's very likely to be in the cache). instrs.append(InstrSpec('invert', pg('S', 'S'), code='mpc_ui_div(o0, 1, i0, MPC_RNDNN);')) self.instr_descs = instrs self._set_opcodes() # Supported for exponents that fit in a long, so we could use # a much wider range on a 64-bit machine. On the other hand, # it's easier to write the code this way, and constant integer # exponents outside this range probably aren't very common anyway. self.ipow_range = (int(-2**31), int(2**31-1))
[ "def", "__init__", "(", "self", ")", ":", "mc_retval", "=", "MemoryChunkCCRetval", "(", "'retval'", ",", "ty_mpc", ")", "super", "(", "CCInterpreter", ",", "self", ")", ".", "__init__", "(", "ty_mpc", ",", "mc_retval", "=", "mc_retval", ")", "self", ".", "err_return", "=", "'0'", "self", ".", "mc_py_constants", "=", "MemoryChunkConstants", "(", "'py_constants'", ",", "ty_python", ")", "self", ".", "mc_domain", "=", "MemoryChunkPyConstant", "(", "'domain'", ")", "self", ".", "chunks", "=", "[", "self", ".", "mc_args", ",", "self", ".", "mc_retval", ",", "self", ".", "mc_constants", ",", "self", ".", "mc_py_constants", ",", "self", ".", "mc_stack", ",", "self", ".", "mc_code", ",", "self", ".", "mc_domain", "]", "pg", "=", "params_gen", "(", "A", "=", "self", ".", "mc_args", ",", "C", "=", "self", ".", "mc_constants", ",", "D", "=", "self", ".", "mc_code", ",", "S", "=", "self", ".", "mc_stack", ",", "P", "=", "self", ".", "mc_py_constants", ")", "self", ".", "pg", "=", "pg", "self", ".", "c_header", "=", "ri", "(", "0", ",", "'''\n #include <mpc.h>\n #include \"sage/ext/interpreters/wrapper_cc.h\"\n '''", ")", "self", ".", "pxd_header", "=", "ri", "(", "0", ",", "\"\"\"\n from sage.rings.real_mpfr cimport RealNumber\n from sage.libs.mpfr cimport *\n from sage.rings.complex_mpfr cimport ComplexNumber\n from sage.libs.mpc cimport *\n \"\"\"", ")", "self", ".", "pyx_header", "=", "ri", "(", "0", ",", "\"\"\"\\\n # distutils: libraries = mpfr mpc gmp\n\n cdef public bint cc_py_call_helper(object domain, object fn,\n int n_args,\n mpc_t* args, mpc_t retval) except 0:\n py_args = []\n cdef int i\n cdef ComplexNumber ZERO=domain.zero()\n cdef ComplexNumber cn\n for i from 0 <= i < n_args:\n cn = ZERO._new()\n mpfr_set(cn.__re, mpc_realref(args[i]), MPFR_RNDN)\n mpfr_set(cn.__im, mpc_imagref(args[i]), MPFR_RNDN)\n py_args.append(cn)\n cdef ComplexNumber result = domain(fn(*py_args))\n mpc_set_fr_fr(retval, result.__re,result.__im, MPC_RNDNN)\n return 1\n \"\"\"", ")", "instrs", "=", "[", "InstrSpec", "(", "'load_arg'", ",", "pg", "(", "'A[D]'", ",", "'S'", ")", ",", "code", "=", "'mpc_set(o0, i0, MPC_RNDNN);'", ")", ",", "InstrSpec", "(", "'load_const'", ",", "pg", "(", "'C[D]'", ",", "'S'", ")", ",", "code", "=", "'mpc_set(o0, i0, MPC_RNDNN);'", ")", ",", "InstrSpec", "(", "'return'", ",", "pg", "(", "'S'", ",", "''", ")", ",", "code", "=", "'mpc_set(retval, i0, MPC_RNDNN);\\nreturn 1;\\n'", ")", ",", "InstrSpec", "(", "'py_call'", ",", "pg", "(", "'P[D]S@D'", ",", "'S'", ")", ",", "uses_error_handler", "=", "True", ",", "code", "=", "\"\"\"\n if (!cc_py_call_helper(domain, i0, n_i1, i1, o0)) {\n goto error;\n}\n\"\"\"", ")", "]", "for", "(", "name", ",", "op", ")", "in", "[", "(", "'add'", ",", "'mpc_add'", ")", ",", "(", "'sub'", ",", "'mpc_sub'", ")", ",", "(", "'mul'", ",", "'mpc_mul'", ")", ",", "(", "'div'", ",", "'mpc_div'", ")", ",", "(", "'pow'", ",", "'mpc_pow'", ")", "]", ":", "instrs", ".", "append", "(", "instr_funcall_2args_mpc", "(", "name", ",", "pg", "(", "'SS'", ",", "'S'", ")", ",", "op", ")", ")", "instrs", ".", "append", "(", "instr_funcall_2args_mpc", "(", "'ipow'", ",", "pg", "(", "'SD'", ",", "'S'", ")", ",", "'mpc_pow_si'", ")", ")", "for", "name", "in", "[", "'neg'", ",", "'log'", ",", "'log10'", ",", "'exp'", ",", "'cos'", ",", "'sin'", ",", "'tan'", ",", "'acos'", ",", "'asin'", ",", "'atan'", ",", "'cosh'", ",", "'sinh'", ",", "'tanh'", ",", "'acosh'", ",", "'asinh'", ",", "'atanh'", "]", ":", "instrs", ".", "append", "(", "instr_funcall_1arg_mpc", "(", "name", ",", "pg", "(", "'S'", ",", "'S'", ")", ",", "'mpc_'", "+", "name", ")", ")", "# mpc_ui_div constructs a temporary mpc_t and then calls mpc_div;", "# it would probably be (slightly) faster to use a permanent copy", "# of \"one\" (on the other hand, the constructed temporary copy is", "# on the stack, so it's very likely to be in the cache).", "instrs", ".", "append", "(", "InstrSpec", "(", "'invert'", ",", "pg", "(", "'S'", ",", "'S'", ")", ",", "code", "=", "'mpc_ui_div(o0, 1, i0, MPC_RNDNN);'", ")", ")", "self", ".", "instr_descs", "=", "instrs", "self", ".", "_set_opcodes", "(", ")", "# Supported for exponents that fit in a long, so we could use", "# a much wider range on a 64-bit machine. On the other hand,", "# it's easier to write the code this way, and constant integer", "# exponents outside this range probably aren't very common anyway.", "self", ".", "ipow_range", "=", "(", "int", "(", "-", "2", "**", "31", ")", ",", "int", "(", "2", "**", "31", "-", "1", ")", ")" ]
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage_setup/autogen/interpreters/specs/cc.py#L107-L240
AndrewAnnex/SpiceyPy
9f8b626338f119bacd39ef2ba94a6f71bd6341c0
src/spiceypy/spiceypy.py
python
unorm
(v1: ndarray)
return stypes.c_vector_to_python(vout), vmag.value
Normalize a double precision 3-vector and return its magnitude. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/unorm_c.html :param v1: Vector to be normalized. :return: Unit vector of v1, Magnitude of v1.
Normalize a double precision 3-vector and return its magnitude.
[ "Normalize", "a", "double", "precision", "3", "-", "vector", "and", "return", "its", "magnitude", "." ]
def unorm(v1: ndarray) -> Tuple[ndarray, float]: """ Normalize a double precision 3-vector and return its magnitude. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/unorm_c.html :param v1: Vector to be normalized. :return: Unit vector of v1, Magnitude of v1. """ v1 = stypes.to_double_vector(v1) vout = stypes.empty_double_vector(3) vmag = ctypes.c_double() libspice.unorm_c(v1, vout, ctypes.byref(vmag)) return stypes.c_vector_to_python(vout), vmag.value
[ "def", "unorm", "(", "v1", ":", "ndarray", ")", "->", "Tuple", "[", "ndarray", ",", "float", "]", ":", "v1", "=", "stypes", ".", "to_double_vector", "(", "v1", ")", "vout", "=", "stypes", ".", "empty_double_vector", "(", "3", ")", "vmag", "=", "ctypes", ".", "c_double", "(", ")", "libspice", ".", "unorm_c", "(", "v1", ",", "vout", ",", "ctypes", ".", "byref", "(", "vmag", ")", ")", "return", "stypes", ".", "c_vector_to_python", "(", "vout", ")", ",", "vmag", ".", "value" ]
https://github.com/AndrewAnnex/SpiceyPy/blob/9f8b626338f119bacd39ef2ba94a6f71bd6341c0/src/spiceypy/spiceypy.py#L14556-L14569
libtcod/python-tcod
e12c4172baa9efdfd74aff6ee9bab8454a835248
tcod/console.py
python
Console.default_bg
(self)
return color.r, color.g, color.b
Tuple[int, int, int]: The default background color.
Tuple[int, int, int]: The default background color.
[ "Tuple", "[", "int", "int", "int", "]", ":", "The", "default", "background", "color", "." ]
def default_bg(self) -> Tuple[int, int, int]: """Tuple[int, int, int]: The default background color.""" color = self._console_data.back return color.r, color.g, color.b
[ "def", "default_bg", "(", "self", ")", "->", "Tuple", "[", "int", ",", "int", ",", "int", "]", ":", "color", "=", "self", ".", "_console_data", ".", "back", "return", "color", ".", "r", ",", "color", ".", "g", ",", "color", ".", "b" ]
https://github.com/libtcod/python-tcod/blob/e12c4172baa9efdfd74aff6ee9bab8454a835248/tcod/console.py#L345-L348
eventlet/eventlet
955be1c7227a6df0daa537ebb8aed0cfa174d2e5
eventlet/greenio/base.py
python
socket_accept
(descriptor)
Attempts to accept() on the descriptor, returns a client,address tuple if it succeeds; returns None if it needs to trampoline, and raises any exceptions.
Attempts to accept() on the descriptor, returns a client,address tuple if it succeeds; returns None if it needs to trampoline, and raises any exceptions.
[ "Attempts", "to", "accept", "()", "on", "the", "descriptor", "returns", "a", "client", "address", "tuple", "if", "it", "succeeds", ";", "returns", "None", "if", "it", "needs", "to", "trampoline", "and", "raises", "any", "exceptions", "." ]
def socket_accept(descriptor): """ Attempts to accept() on the descriptor, returns a client,address tuple if it succeeds; returns None if it needs to trampoline, and raises any exceptions. """ try: return descriptor.accept() except socket.error as e: if get_errno(e) == errno.EWOULDBLOCK: return None raise
[ "def", "socket_accept", "(", "descriptor", ")", ":", "try", ":", "return", "descriptor", ".", "accept", "(", ")", "except", "socket", ".", "error", "as", "e", ":", "if", "get_errno", "(", "e", ")", "==", "errno", ".", "EWOULDBLOCK", ":", "return", "None", "raise" ]
https://github.com/eventlet/eventlet/blob/955be1c7227a6df0daa537ebb8aed0cfa174d2e5/eventlet/greenio/base.py#L57-L68
llSourcell/AI_Artist
3038c06c2e389b9c919c881c9a169efe2fd7810e
lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py
python
ExFileObject.seekable
(self)
return self.fileobj.seekable()
[]
def seekable(self): return self.fileobj.seekable()
[ "def", "seekable", "(", "self", ")", ":", "return", "self", ".", "fileobj", ".", "seekable", "(", ")" ]
https://github.com/llSourcell/AI_Artist/blob/3038c06c2e389b9c919c881c9a169efe2fd7810e/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py#L807-L808
edisonlz/fastor
342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3
base/site-packages/django/db/backends/__init__.py
python
BaseDatabaseIntrospection.get_indexes
(self, cursor, table_name)
Returns a dictionary of indexed fieldname -> infodict for the given table, where each infodict is in the format: {'primary_key': boolean representing whether it's the primary key, 'unique': boolean representing whether it's a unique index} Only single-column indexes are introspected.
Returns a dictionary of indexed fieldname -> infodict for the given table, where each infodict is in the format: {'primary_key': boolean representing whether it's the primary key, 'unique': boolean representing whether it's a unique index}
[ "Returns", "a", "dictionary", "of", "indexed", "fieldname", "-", ">", "infodict", "for", "the", "given", "table", "where", "each", "infodict", "is", "in", "the", "format", ":", "{", "primary_key", ":", "boolean", "representing", "whether", "it", "s", "the", "primary", "key", "unique", ":", "boolean", "representing", "whether", "it", "s", "a", "unique", "index", "}" ]
def get_indexes(self, cursor, table_name): """ Returns a dictionary of indexed fieldname -> infodict for the given table, where each infodict is in the format: {'primary_key': boolean representing whether it's the primary key, 'unique': boolean representing whether it's a unique index} Only single-column indexes are introspected. """ raise NotImplementedError
[ "def", "get_indexes", "(", "self", ",", "cursor", ",", "table_name", ")", ":", "raise", "NotImplementedError" ]
https://github.com/edisonlz/fastor/blob/342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3/base/site-packages/django/db/backends/__init__.py#L1344-L1353
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
openshift/installer/vendored/openshift-ansible-3.10.0-0.29.0/roles/lib_openshift/library/oc_adm_csr.py
python
OpenShiftCLIConfig.config_options
(self)
return self._options
return config options
return config options
[ "return", "config", "options" ]
def config_options(self): ''' return config options ''' return self._options
[ "def", "config_options", "(", "self", ")", ":", "return", "self", ".", "_options" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.10.0-0.29.0/roles/lib_openshift/library/oc_adm_csr.py#L1439-L1441
dipu-bd/lightnovel-crawler
eca7a71f217ce7a6b0a54d2e2afb349571871880
sources/en/w/wnmtl.py
python
WNMTLCrawler.download_chapter_body
(self, chapter)
return '\n'.join(['<p>' + x + '</p>' for x in contents])
Download body of a single chapter and return as clean html format.
Download body of a single chapter and return as clean html format.
[ "Download", "body", "of", "a", "single", "chapter", "and", "return", "as", "clean", "html", "format", "." ]
def download_chapter_body(self, chapter): '''Download body of a single chapter and return as clean html format.''' data = self.get_json(chapter['url']) contents = data['data']['content'].split('\n') return '\n'.join(['<p>' + x + '</p>' for x in contents])
[ "def", "download_chapter_body", "(", "self", ",", "chapter", ")", ":", "data", "=", "self", ".", "get_json", "(", "chapter", "[", "'url'", "]", ")", "contents", "=", "data", "[", "'data'", "]", "[", "'content'", "]", ".", "split", "(", "'\\n'", ")", "return", "'\\n'", ".", "join", "(", "[", "'<p>'", "+", "x", "+", "'</p>'", "for", "x", "in", "contents", "]", ")" ]
https://github.com/dipu-bd/lightnovel-crawler/blob/eca7a71f217ce7a6b0a54d2e2afb349571871880/sources/en/w/wnmtl.py#L88-L92
rpmuller/pyquante2
6e34cb4480ae7dbd8c5e44d221d8b27584890c83
pyquante2/utils.py
python
cholorth
(S)
return np.linalg.inv(np.linalg.cholesky(S)).T
Cholesky orthogonalization
Cholesky orthogonalization
[ "Cholesky", "orthogonalization" ]
def cholorth(S): "Cholesky orthogonalization" return np.linalg.inv(np.linalg.cholesky(S)).T
[ "def", "cholorth", "(", "S", ")", ":", "return", "np", ".", "linalg", ".", "inv", "(", "np", ".", "linalg", ".", "cholesky", "(", "S", ")", ")", ".", "T" ]
https://github.com/rpmuller/pyquante2/blob/6e34cb4480ae7dbd8c5e44d221d8b27584890c83/pyquante2/utils.py#L163-L165
Toblerity/rtree
eb04ef8933418ab108a45b6576abea95d6cbcbdb
rtree/index.py
python
CustomStorage.destroy
(self, returnError)
Must be overridden. No return value.
Must be overridden. No return value.
[ "Must", "be", "overridden", ".", "No", "return", "value", "." ]
def destroy(self, returnError): """Must be overridden. No return value.""" returnError.contents.value = self.IllegalStateError raise NotImplementedError("You must override this method.")
[ "def", "destroy", "(", "self", ",", "returnError", ")", ":", "returnError", ".", "contents", ".", "value", "=", "self", ".", "IllegalStateError", "raise", "NotImplementedError", "(", "\"You must override this method.\"", ")" ]
https://github.com/Toblerity/rtree/blob/eb04ef8933418ab108a45b6576abea95d6cbcbdb/rtree/index.py#L1771-L1774
mortcanty/CRCPython
35d0e9f96befd38d4a78671c868128440c74b0e6
src/build/lib/auxil/png.py
python
Test.testPAMin
(self)
Test that the command line tool can read PAM file.
Test that the command line tool can read PAM file.
[ "Test", "that", "the", "command", "line", "tool", "can", "read", "PAM", "file", "." ]
def testPAMin(self): """Test that the command line tool can read PAM file.""" def do(): return _main(['testPAMin']) s = StringIO() s.write('P7\nWIDTH 3\nHEIGHT 1\nDEPTH 4\nMAXVAL 255\n' 'TUPLTYPE RGB_ALPHA\nENDHDR\n') # The pixels in flat row flat pixel format flat = [255,0,0,255, 0,255,0,120, 0,0,255,30] s.write(''.join(map(chr, flat))) s.flush() s.seek(0) o = StringIO() testWithIO(s, o, do) r = Reader(bytes=o.getvalue()) x,y,pixels,meta = r.read() self.assert_(r.alpha) self.assert_(not r.greyscale) self.assertEqual(list(itertools.chain(*pixels)), flat)
[ "def", "testPAMin", "(", "self", ")", ":", "def", "do", "(", ")", ":", "return", "_main", "(", "[", "'testPAMin'", "]", ")", "s", "=", "StringIO", "(", ")", "s", ".", "write", "(", "'P7\\nWIDTH 3\\nHEIGHT 1\\nDEPTH 4\\nMAXVAL 255\\n'", "'TUPLTYPE RGB_ALPHA\\nENDHDR\\n'", ")", "# The pixels in flat row flat pixel format", "flat", "=", "[", "255", ",", "0", ",", "0", ",", "255", ",", "0", ",", "255", ",", "0", ",", "120", ",", "0", ",", "0", ",", "255", ",", "30", "]", "s", ".", "write", "(", "''", ".", "join", "(", "map", "(", "chr", ",", "flat", ")", ")", ")", "s", ".", "flush", "(", ")", "s", ".", "seek", "(", "0", ")", "o", "=", "StringIO", "(", ")", "testWithIO", "(", "s", ",", "o", ",", "do", ")", "r", "=", "Reader", "(", "bytes", "=", "o", ".", "getvalue", "(", ")", ")", "x", ",", "y", ",", "pixels", ",", "meta", "=", "r", ".", "read", "(", ")", "self", ".", "assert_", "(", "r", ".", "alpha", ")", "self", ".", "assert_", "(", "not", "r", ".", "greyscale", ")", "self", ".", "assertEqual", "(", "list", "(", "itertools", ".", "chain", "(", "*", "pixels", ")", ")", ",", "flat", ")" ]
https://github.com/mortcanty/CRCPython/blob/35d0e9f96befd38d4a78671c868128440c74b0e6/src/build/lib/auxil/png.py#L2254-L2272
JaniceWuo/MovieRecommend
4c86db64ca45598917d304f535413df3bc9fea65
movierecommend/venv1/Lib/site-packages/django/core/cache/backends/filebased.py
python
FileBasedCache._delete
(self, fname)
[]
def _delete(self, fname): if not fname.startswith(self._dir) or not os.path.exists(fname): return try: os.remove(fname) except OSError as e: # ENOENT can happen if the cache file is removed (by another # process) after the os.path.exists check. if e.errno != errno.ENOENT: raise
[ "def", "_delete", "(", "self", ",", "fname", ")", ":", "if", "not", "fname", ".", "startswith", "(", "self", ".", "_dir", ")", "or", "not", "os", ".", "path", ".", "exists", "(", "fname", ")", ":", "return", "try", ":", "os", ".", "remove", "(", "fname", ")", "except", "OSError", "as", "e", ":", "# ENOENT can happen if the cache file is removed (by another", "# process) after the os.path.exists check.", "if", "e", ".", "errno", "!=", "errno", ".", "ENOENT", ":", "raise" ]
https://github.com/JaniceWuo/MovieRecommend/blob/4c86db64ca45598917d304f535413df3bc9fea65/movierecommend/venv1/Lib/site-packages/django/core/cache/backends/filebased.py#L67-L76
NifTK/NiftyNet
935bf4334cd00fa9f9d50f6a95ddcbfdde4031e0
niftynet/evaluation/region_properties.py
python
RegionProperties.compactness
(self)
return np.power(Sn, 1.5) / Vn, np.power(Snb, 1.5) / Vnb, \ np.power(Sv, 1.5) / Vv, np.power(Svb, 1.5) / Vvb
Calculates the compactness S^1.5/V in terms of probabilistic count, binarised count, probabilistic volume, binarised volume :return:
Calculates the compactness S^1.5/V in terms of probabilistic count, binarised count, probabilistic volume, binarised volume
[ "Calculates", "the", "compactness", "S^1", ".", "5", "/", "V", "in", "terms", "of", "probabilistic", "count", "binarised", "count", "probabilistic", "volume", "binarised", "volume" ]
def compactness(self): """ Calculates the compactness S^1.5/V in terms of probabilistic count, binarised count, probabilistic volume, binarised volume :return: """ Sn, Snb, Sv, Svb = self.surface() Vn, Vnb, Vv, Vvb = self.volume() return np.power(Sn, 1.5) / Vn, np.power(Snb, 1.5) / Vnb, \ np.power(Sv, 1.5) / Vv, np.power(Svb, 1.5) / Vvb
[ "def", "compactness", "(", "self", ")", ":", "Sn", ",", "Snb", ",", "Sv", ",", "Svb", "=", "self", ".", "surface", "(", ")", "Vn", ",", "Vnb", ",", "Vv", ",", "Vvb", "=", "self", ".", "volume", "(", ")", "return", "np", ".", "power", "(", "Sn", ",", "1.5", ")", "/", "Vn", ",", "np", ".", "power", "(", "Snb", ",", "1.5", ")", "/", "Vnb", ",", "np", ".", "power", "(", "Sv", ",", "1.5", ")", "/", "Vv", ",", "np", ".", "power", "(", "Svb", ",", "1.5", ")", "/", "Vvb" ]
https://github.com/NifTK/NiftyNet/blob/935bf4334cd00fa9f9d50f6a95ddcbfdde4031e0/niftynet/evaluation/region_properties.py#L614-L624
oilshell/oil
94388e7d44a9ad879b12615f6203b38596b5a2d3
Python-2.7.13/Lib/urlparse.py
python
urldefrag
(url)
Removes any existing fragment from URL. Returns a tuple of the defragmented URL and the fragment. If the URL contained no fragments, the second element is the empty string.
Removes any existing fragment from URL.
[ "Removes", "any", "existing", "fragment", "from", "URL", "." ]
def urldefrag(url): """Removes any existing fragment from URL. Returns a tuple of the defragmented URL and the fragment. If the URL contained no fragments, the second element is the empty string. """ if '#' in url: s, n, p, a, q, frag = urlparse(url) defrag = urlunparse((s, n, p, a, q, '')) return defrag, frag else: return url, ''
[ "def", "urldefrag", "(", "url", ")", ":", "if", "'#'", "in", "url", ":", "s", ",", "n", ",", "p", ",", "a", ",", "q", ",", "frag", "=", "urlparse", "(", "url", ")", "defrag", "=", "urlunparse", "(", "(", "s", ",", "n", ",", "p", ",", "a", ",", "q", ",", "''", ")", ")", "return", "defrag", ",", "frag", "else", ":", "return", "url", ",", "''" ]
https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/Python-2.7.13/Lib/urlparse.py#L303-L315
entropy1337/infernal-twin
10995cd03312e39a48ade0f114ebb0ae3a711bb8
Modules/build/pip/pip/_vendor/requests/models.py
python
RequestEncodingMixin.path_url
(self)
return ''.join(url)
Build the path URL to use.
Build the path URL to use.
[ "Build", "the", "path", "URL", "to", "use", "." ]
def path_url(self): """Build the path URL to use.""" url = [] p = urlsplit(self.url) path = p.path if not path: path = '/' url.append(path) query = p.query if query: url.append('?') url.append(query) return ''.join(url)
[ "def", "path_url", "(", "self", ")", ":", "url", "=", "[", "]", "p", "=", "urlsplit", "(", "self", ".", "url", ")", "path", "=", "p", ".", "path", "if", "not", "path", ":", "path", "=", "'/'", "url", ".", "append", "(", "path", ")", "query", "=", "p", ".", "query", "if", "query", ":", "url", ".", "append", "(", "'?'", ")", "url", ".", "append", "(", "query", ")", "return", "''", ".", "join", "(", "url", ")" ]
https://github.com/entropy1337/infernal-twin/blob/10995cd03312e39a48ade0f114ebb0ae3a711bb8/Modules/build/pip/pip/_vendor/requests/models.py#L54-L72
mattupstate/flask-security
674b18103fa8734aca71bbd084ea01e3709817ef
flask_security/core.py
python
UserMixin.is_active
(self)
return self.active
Returns `True` if the user is active.
Returns `True` if the user is active.
[ "Returns", "True", "if", "the", "user", "is", "active", "." ]
def is_active(self): """Returns `True` if the user is active.""" return self.active
[ "def", "is_active", "(", "self", ")", ":", "return", "self", ".", "active" ]
https://github.com/mattupstate/flask-security/blob/674b18103fa8734aca71bbd084ea01e3709817ef/flask_security/core.py#L390-L392
Tesorio/django-anon
5288d84001e2cbe4027c3c0f00173e6fe49da959
anon/compat.py
python
bulk_update
(objects, fields, manager, **bulk_update_kwargs)
Updates the list of objects using django queryset's inbuilt ``.bulk_update()`` method if present, else django_bulk_update's ``bulk_update()`` will be used :param objects: list of objects that needs to be bulk updated :type objects: list[object] :param manager: instance of django model manager :type manager: models.Manager :param bulk_update_kwargs: keyword arguments passed to the ``bulk_update()`` :return: None :rtype: None
Updates the list of objects using django queryset's inbuilt ``.bulk_update()`` method if present, else django_bulk_update's ``bulk_update()`` will be used
[ "Updates", "the", "list", "of", "objects", "using", "django", "queryset", "s", "inbuilt", ".", "bulk_update", "()", "method", "if", "present", "else", "django_bulk_update", "s", "bulk_update", "()", "will", "be", "used" ]
def bulk_update(objects, fields, manager, **bulk_update_kwargs): """Updates the list of objects using django queryset's inbuilt ``.bulk_update()`` method if present, else django_bulk_update's ``bulk_update()`` will be used :param objects: list of objects that needs to be bulk updated :type objects: list[object] :param manager: instance of django model manager :type manager: models.Manager :param bulk_update_kwargs: keyword arguments passed to the ``bulk_update()`` :return: None :rtype: None """ try: manager.bulk_update(objects, fields, **bulk_update_kwargs) except AttributeError: ext_bulk_update(objects, update_fields=fields, **bulk_update_kwargs)
[ "def", "bulk_update", "(", "objects", ",", "fields", ",", "manager", ",", "*", "*", "bulk_update_kwargs", ")", ":", "try", ":", "manager", ".", "bulk_update", "(", "objects", ",", "fields", ",", "*", "*", "bulk_update_kwargs", ")", "except", "AttributeError", ":", "ext_bulk_update", "(", "objects", ",", "update_fields", "=", "fields", ",", "*", "*", "bulk_update_kwargs", ")" ]
https://github.com/Tesorio/django-anon/blob/5288d84001e2cbe4027c3c0f00173e6fe49da959/anon/compat.py#L5-L21
StanfordVL/taskonomy
9f814867b5fe4165860862211e8e99b0f200144d
code/lib/optimizers/train_steps.py
python
get_default_train_step_kwargs
( global_step, max_steps, log_every_n_steps=1, trace_every_n_steps=None )
Sets some default arguments for any train_step_fn
Sets some default arguments for any train_step_fn
[ "Sets", "some", "default", "arguments", "for", "any", "train_step_fn" ]
def get_default_train_step_kwargs( global_step, max_steps, log_every_n_steps=1, trace_every_n_steps=None ): ''' Sets some default arguments for any train_step_fn ''' with tf.name_scope('train_step'): train_step_kwargs = { 'max_steps': max_steps } if max_steps: should_stop_op = tf.greater_equal(global_step, max_steps) else: should_stop_op = tf.constant(False) train_step_kwargs['should_stop'] = should_stop_op train_step_kwargs['should_log'] = lambda x: ( x % log_every_n_steps == 0 ) if trace_every_n_steps is not None: train_step_kwargs['should_trace'] = tf.equal( tf.mod(global_step, trace_every_n_steps), 0) train_step_kwargs['logdir'] = logdir train_step_kwargs[ 'global_step_copy' ] = tf.identity( global_step, name='global_step_copy' ) train_step_kwargs[ 'increment_global_step_op' ] = tf.assign( global_step, global_step+1 ) return train_step_kwargs
[ "def", "get_default_train_step_kwargs", "(", "global_step", ",", "max_steps", ",", "log_every_n_steps", "=", "1", ",", "trace_every_n_steps", "=", "None", ")", ":", "with", "tf", ".", "name_scope", "(", "'train_step'", ")", ":", "train_step_kwargs", "=", "{", "'max_steps'", ":", "max_steps", "}", "if", "max_steps", ":", "should_stop_op", "=", "tf", ".", "greater_equal", "(", "global_step", ",", "max_steps", ")", "else", ":", "should_stop_op", "=", "tf", ".", "constant", "(", "False", ")", "train_step_kwargs", "[", "'should_stop'", "]", "=", "should_stop_op", "train_step_kwargs", "[", "'should_log'", "]", "=", "lambda", "x", ":", "(", "x", "%", "log_every_n_steps", "==", "0", ")", "if", "trace_every_n_steps", "is", "not", "None", ":", "train_step_kwargs", "[", "'should_trace'", "]", "=", "tf", ".", "equal", "(", "tf", ".", "mod", "(", "global_step", ",", "trace_every_n_steps", ")", ",", "0", ")", "train_step_kwargs", "[", "'logdir'", "]", "=", "logdir", "train_step_kwargs", "[", "'global_step_copy'", "]", "=", "tf", ".", "identity", "(", "global_step", ",", "name", "=", "'global_step_copy'", ")", "train_step_kwargs", "[", "'increment_global_step_op'", "]", "=", "tf", ".", "assign", "(", "global_step", ",", "global_step", "+", "1", ")", "return", "train_step_kwargs" ]
https://github.com/StanfordVL/taskonomy/blob/9f814867b5fe4165860862211e8e99b0f200144d/code/lib/optimizers/train_steps.py#L109-L128
titusjan/argos
5a9c31a8a9a2ca825bbf821aa1e685740e3682d7
argos/inspector/pgplugins/old_imageplot2d.py
python
PgImagePlot2dCti._closeResources
(self)
Disconnects signals. Is called by self.finalize when the cti is deleted.
Disconnects signals. Is called by self.finalize when the cti is deleted.
[ "Disconnects", "signals", ".", "Is", "called", "by", "self", ".", "finalize", "when", "the", "cti", "is", "deleted", "." ]
def _closeResources(self): """ Disconnects signals. Is called by self.finalize when the cti is deleted. """ verCrossViewBox = self.pgImagePlot2d.verCrossPlotItem.getViewBox() verCrossViewBox.sigRangeChangedManually.disconnect(self.yAxisRangeCti.setAutoRangeOff) horCrossViewBox = self.pgImagePlot2d.horCrossPlotItem.getViewBox() horCrossViewBox.sigRangeChangedManually.disconnect(self.xAxisRangeCti.setAutoRangeOff) self.pgImagePlot2d.verCrossPlotItem.sigResetAxis.disconnect(self.setVerCrossPlotAutoRangeOn) self.pgImagePlot2d.horCrossPlotItem.sigResetAxis.disconnect(self.setHorCrossPlotAutoRangeOn) self.pgImagePlot2d.imagePlotItem.sigResetAxis.disconnect(self.setImagePlotAutoRangeOn)
[ "def", "_closeResources", "(", "self", ")", ":", "verCrossViewBox", "=", "self", ".", "pgImagePlot2d", ".", "verCrossPlotItem", ".", "getViewBox", "(", ")", "verCrossViewBox", ".", "sigRangeChangedManually", ".", "disconnect", "(", "self", ".", "yAxisRangeCti", ".", "setAutoRangeOff", ")", "horCrossViewBox", "=", "self", ".", "pgImagePlot2d", ".", "horCrossPlotItem", ".", "getViewBox", "(", ")", "horCrossViewBox", ".", "sigRangeChangedManually", ".", "disconnect", "(", "self", ".", "xAxisRangeCti", ".", "setAutoRangeOff", ")", "self", ".", "pgImagePlot2d", ".", "verCrossPlotItem", ".", "sigResetAxis", ".", "disconnect", "(", "self", ".", "setVerCrossPlotAutoRangeOn", ")", "self", ".", "pgImagePlot2d", ".", "horCrossPlotItem", ".", "sigResetAxis", ".", "disconnect", "(", "self", ".", "setHorCrossPlotAutoRangeOn", ")", "self", ".", "pgImagePlot2d", ".", "imagePlotItem", ".", "sigResetAxis", ".", "disconnect", "(", "self", ".", "setImagePlotAutoRangeOn", ")" ]
https://github.com/titusjan/argos/blob/5a9c31a8a9a2ca825bbf821aa1e685740e3682d7/argos/inspector/pgplugins/old_imageplot2d.py#L227-L238
1040003585/WebScrapingWithPython
a770fa5b03894076c8c9539b1ffff34424ffc016
portia_examle/lib/python2.7/site-packages/setuptools/dist.py
python
check_requirements
(dist, attr, value)
Verify that install_requires is a valid requirements list
Verify that install_requires is a valid requirements list
[ "Verify", "that", "install_requires", "is", "a", "valid", "requirements", "list" ]
def check_requirements(dist, attr, value): """Verify that install_requires is a valid requirements list""" try: list(pkg_resources.parse_requirements(value)) except (TypeError, ValueError) as error: tmpl = ( "{attr!r} must be a string or list of strings " "containing valid project/version requirement specifiers; {error}" ) raise DistutilsSetupError(tmpl.format(attr=attr, error=error))
[ "def", "check_requirements", "(", "dist", ",", "attr", ",", "value", ")", ":", "try", ":", "list", "(", "pkg_resources", ".", "parse_requirements", "(", "value", ")", ")", "except", "(", "TypeError", ",", "ValueError", ")", "as", "error", ":", "tmpl", "=", "(", "\"{attr!r} must be a string or list of strings \"", "\"containing valid project/version requirement specifiers; {error}\"", ")", "raise", "DistutilsSetupError", "(", "tmpl", ".", "format", "(", "attr", "=", "attr", ",", "error", "=", "error", ")", ")" ]
https://github.com/1040003585/WebScrapingWithPython/blob/a770fa5b03894076c8c9539b1ffff34424ffc016/portia_examle/lib/python2.7/site-packages/setuptools/dist.py#L148-L157
tf-encrypted/tf-encrypted
8b7cfb32c426e9a6f56769a1b47626bd1be03a66
tf_encrypted/protocol/pond/pond.py
python
Pond.add
(self, x, y)
return self.dispatch("add", x, y)
add(x, y) -> PondTensor Adds two tensors `x` and `y`. :param PondTensor x: The first operand. :param PondTensor y: The second operand.
add(x, y) -> PondTensor
[ "add", "(", "x", "y", ")", "-", ">", "PondTensor" ]
def add(self, x, y): """ add(x, y) -> PondTensor Adds two tensors `x` and `y`. :param PondTensor x: The first operand. :param PondTensor y: The second operand. """ x, y = self.lift(x, y) return self.dispatch("add", x, y)
[ "def", "add", "(", "self", ",", "x", ",", "y", ")", ":", "x", ",", "y", "=", "self", ".", "lift", "(", "x", ",", "y", ")", "return", "self", ".", "dispatch", "(", "\"add\"", ",", "x", ",", "y", ")" ]
https://github.com/tf-encrypted/tf-encrypted/blob/8b7cfb32c426e9a6f56769a1b47626bd1be03a66/tf_encrypted/protocol/pond/pond.py#L862-L872
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/site-packages/bokeh-1.4.0-py3.7.egg/bokeh/client/session.py
python
push_session
(document, session_id=None, url='default', io_loop=None)
return session
Create a session by pushing the given document to the server, overwriting any existing server-side document. ``session.document`` in the returned session will be your supplied document. While the connection to the server is open, changes made on the server side will be applied to this document, and changes made on the client side will be synced to the server. In a production scenario, the ``session_id`` should be unique for each browser tab, which keeps users from stomping on each other. It's neither scalable nor secure to use predictable session IDs or to share session IDs across users. For a notebook running on a single machine, ``session_id`` could be something human-readable such as ``"default"`` for convenience. If you allow ``push_session()`` to generate a unique ``session_id``, you can obtain the generated ID with the ``id`` property on the returned ``ClientSession``. Args: document : (bokeh.document.Document) The document to be pushed and set as session.document session_id : (string, optional) The name of the session, None to autogenerate a random one (default: None) url : (str, optional): The URL to a Bokeh application on a Bokeh server can also be `"default"` which will connect to the default app URL io_loop : (tornado.ioloop.IOLoop, optional) The IOLoop to use for the websocket Returns: ClientSession A new ClientSession connected to the server
Create a session by pushing the given document to the server, overwriting any existing server-side document.
[ "Create", "a", "session", "by", "pushing", "the", "given", "document", "to", "the", "server", "overwriting", "any", "existing", "server", "-", "side", "document", "." ]
def push_session(document, session_id=None, url='default', io_loop=None): ''' Create a session by pushing the given document to the server, overwriting any existing server-side document. ``session.document`` in the returned session will be your supplied document. While the connection to the server is open, changes made on the server side will be applied to this document, and changes made on the client side will be synced to the server. In a production scenario, the ``session_id`` should be unique for each browser tab, which keeps users from stomping on each other. It's neither scalable nor secure to use predictable session IDs or to share session IDs across users. For a notebook running on a single machine, ``session_id`` could be something human-readable such as ``"default"`` for convenience. If you allow ``push_session()`` to generate a unique ``session_id``, you can obtain the generated ID with the ``id`` property on the returned ``ClientSession``. Args: document : (bokeh.document.Document) The document to be pushed and set as session.document session_id : (string, optional) The name of the session, None to autogenerate a random one (default: None) url : (str, optional): The URL to a Bokeh application on a Bokeh server can also be `"default"` which will connect to the default app URL io_loop : (tornado.ioloop.IOLoop, optional) The IOLoop to use for the websocket Returns: ClientSession A new ClientSession connected to the server ''' coords = _SessionCoordinates(session_id=session_id, url=url) session = ClientSession(session_id=coords.session_id, websocket_url=websocket_url_for_server_url(coords.url), io_loop=io_loop) session.push(document) return session
[ "def", "push_session", "(", "document", ",", "session_id", "=", "None", ",", "url", "=", "'default'", ",", "io_loop", "=", "None", ")", ":", "coords", "=", "_SessionCoordinates", "(", "session_id", "=", "session_id", ",", "url", "=", "url", ")", "session", "=", "ClientSession", "(", "session_id", "=", "coords", ".", "session_id", ",", "websocket_url", "=", "websocket_url_for_server_url", "(", "coords", ".", "url", ")", ",", "io_loop", "=", "io_loop", ")", "session", ".", "push", "(", "document", ")", "return", "session" ]
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/bokeh-1.4.0-py3.7.egg/bokeh/client/session.py#L127-L169
linxid/Machine_Learning_Study_Path
558e82d13237114bbb8152483977806fc0c222af
Machine Learning In Action/Chapter4-NaiveBayes/venv/Lib/site-packages/pkg_resources/_vendor/packaging/specifiers.py
python
LegacySpecifier._compare_less_than_equal
(self, prospective, spec)
return prospective <= self._coerce_version(spec)
[]
def _compare_less_than_equal(self, prospective, spec): return prospective <= self._coerce_version(spec)
[ "def", "_compare_less_than_equal", "(", "self", ",", "prospective", ",", "spec", ")", ":", "return", "prospective", "<=", "self", ".", "_coerce_version", "(", "spec", ")" ]
https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter4-NaiveBayes/venv/Lib/site-packages/pkg_resources/_vendor/packaging/specifiers.py#L253-L254
ducksboard/libsaas
615981a3336f65be9d51ae95a48aed9ad3bd1c3c
libsaas/services/trello/notifications.py
python
Notification.card
(self)
return Card(self)
Returns a single card
Returns a single card
[ "Returns", "a", "single", "card" ]
def card(self): """ Returns a single card """ return Card(self)
[ "def", "card", "(", "self", ")", ":", "return", "Card", "(", "self", ")" ]
https://github.com/ducksboard/libsaas/blob/615981a3336f65be9d51ae95a48aed9ad3bd1c3c/libsaas/services/trello/notifications.py#L59-L63
oddt/oddt
8cf555820d97a692ade81c101ebe10e28bcb3722
oddt/fingerprints.py
python
similarity_SPLIF
(reference, query, rmsd_cutoff=1.)
Calculates similarity between structural interaction fingerprints, based on doi:http://pubs.acs.org/doi/abs/10.1021/ci500319f. Parameters ---------- reference, query: numpy.array SPLIFs, which are compared in order to determine similarity. rmsd_cutoff : int (default = 1) Specific treshold for which, bits are considered as fully matching. Returns ------- SimilarityScore : float Similarity between given fingerprints.
Calculates similarity between structural interaction fingerprints, based on doi:http://pubs.acs.org/doi/abs/10.1021/ci500319f.
[ "Calculates", "similarity", "between", "structural", "interaction", "fingerprints", "based", "on", "doi", ":", "http", ":", "//", "pubs", ".", "acs", ".", "org", "/", "doi", "/", "abs", "/", "10", ".", "1021", "/", "ci500319f", "." ]
def similarity_SPLIF(reference, query, rmsd_cutoff=1.): """Calculates similarity between structural interaction fingerprints, based on doi:http://pubs.acs.org/doi/abs/10.1021/ci500319f. Parameters ---------- reference, query: numpy.array SPLIFs, which are compared in order to determine similarity. rmsd_cutoff : int (default = 1) Specific treshold for which, bits are considered as fully matching. Returns ------- SimilarityScore : float Similarity between given fingerprints. """ # intersection of reference and query hashed atoms index = np.intersect1d(reference['hash'], query['hash']) ref_intersection = reference[np.where(np.in1d(reference['hash'], index))] ref_group_intersection = np.split(ref_intersection, np.searchsorted( ref_intersection['hash'], index[1:])) # reference query_intersection = query[np.where(np.in1d(query['hash'], index))] query_group_intersection = np.split(query_intersection, np.searchsorted( query_intersection['hash'], index[1:])) # query numla = 0 # number of unique matching ligand atoms nula = 0 # number of unique ligand atoms numpa = 0 # number of unique matching protein atoms nupa = 0 # number of unique protein atoms def combinatorial_rmsd(reference, query): """Calculates root mean square deviation between groups of points. It takes two matrices of shapes e.g (2, 5, 3) and (4, 5, 3) -> (2, 4).""" return np.sqrt(np.nansum(np.mean( (reference[:, np.newaxis, ...] - query)**2, axis=-1), axis=-1)) for pair in range(len(ref_group_intersection)): # reference protein-ligand pair ref_pair = ref_group_intersection[pair] # query protein-ligand pair query_pair = query_group_intersection[pair] ref_ligand = ref_pair['ligand_coords'] ref_protein = ref_pair['protein_coords'] query_ligand = query_pair['ligand_coords'] query_protein = query_pair['protein_coords'] rmsd_ligand = combinatorial_rmsd(ref_ligand, query_ligand) rmsd_protein = combinatorial_rmsd(ref_protein, query_protein) passing_ligand = rmsd_ligand < rmsd_cutoff passing_protein = rmsd_protein < rmsd_cutoff num_matching_ligand = min(passing_ligand.any(axis=0).sum(), passing_ligand.any(axis=1).sum()) num_matching_protein = min(passing_protein.any(axis=0).sum(), passing_protein.any(axis=1).sum()) num_all_ligand = len(ref_ligand) + len(query_ligand) - num_matching_ligand num_all_protein = len(ref_protein) + len(query_protein) - num_matching_protein numla += num_matching_ligand numpa += num_matching_protein nula += num_all_ligand nupa += num_all_protein if nula == 0 or nupa == 0: return 0. else: return np.sqrt((numla / nula) * (numpa / nupa))
[ "def", "similarity_SPLIF", "(", "reference", ",", "query", ",", "rmsd_cutoff", "=", "1.", ")", ":", "# intersection of reference and query hashed atoms", "index", "=", "np", ".", "intersect1d", "(", "reference", "[", "'hash'", "]", ",", "query", "[", "'hash'", "]", ")", "ref_intersection", "=", "reference", "[", "np", ".", "where", "(", "np", ".", "in1d", "(", "reference", "[", "'hash'", "]", ",", "index", ")", ")", "]", "ref_group_intersection", "=", "np", ".", "split", "(", "ref_intersection", ",", "np", ".", "searchsorted", "(", "ref_intersection", "[", "'hash'", "]", ",", "index", "[", "1", ":", "]", ")", ")", "# reference", "query_intersection", "=", "query", "[", "np", ".", "where", "(", "np", ".", "in1d", "(", "query", "[", "'hash'", "]", ",", "index", ")", ")", "]", "query_group_intersection", "=", "np", ".", "split", "(", "query_intersection", ",", "np", ".", "searchsorted", "(", "query_intersection", "[", "'hash'", "]", ",", "index", "[", "1", ":", "]", ")", ")", "# query", "numla", "=", "0", "# number of unique matching ligand atoms", "nula", "=", "0", "# number of unique ligand atoms", "numpa", "=", "0", "# number of unique matching protein atoms", "nupa", "=", "0", "# number of unique protein atoms", "def", "combinatorial_rmsd", "(", "reference", ",", "query", ")", ":", "\"\"\"Calculates root mean square deviation between groups of points. It\n takes two matrices of shapes e.g (2, 5, 3) and (4, 5, 3) -> (2, 4).\"\"\"", "return", "np", ".", "sqrt", "(", "np", ".", "nansum", "(", "np", ".", "mean", "(", "(", "reference", "[", ":", ",", "np", ".", "newaxis", ",", "...", "]", "-", "query", ")", "**", "2", ",", "axis", "=", "-", "1", ")", ",", "axis", "=", "-", "1", ")", ")", "for", "pair", "in", "range", "(", "len", "(", "ref_group_intersection", ")", ")", ":", "# reference protein-ligand pair", "ref_pair", "=", "ref_group_intersection", "[", "pair", "]", "# query protein-ligand pair", "query_pair", "=", "query_group_intersection", "[", "pair", "]", "ref_ligand", "=", "ref_pair", "[", "'ligand_coords'", "]", "ref_protein", "=", "ref_pair", "[", "'protein_coords'", "]", "query_ligand", "=", "query_pair", "[", "'ligand_coords'", "]", "query_protein", "=", "query_pair", "[", "'protein_coords'", "]", "rmsd_ligand", "=", "combinatorial_rmsd", "(", "ref_ligand", ",", "query_ligand", ")", "rmsd_protein", "=", "combinatorial_rmsd", "(", "ref_protein", ",", "query_protein", ")", "passing_ligand", "=", "rmsd_ligand", "<", "rmsd_cutoff", "passing_protein", "=", "rmsd_protein", "<", "rmsd_cutoff", "num_matching_ligand", "=", "min", "(", "passing_ligand", ".", "any", "(", "axis", "=", "0", ")", ".", "sum", "(", ")", ",", "passing_ligand", ".", "any", "(", "axis", "=", "1", ")", ".", "sum", "(", ")", ")", "num_matching_protein", "=", "min", "(", "passing_protein", ".", "any", "(", "axis", "=", "0", ")", ".", "sum", "(", ")", ",", "passing_protein", ".", "any", "(", "axis", "=", "1", ")", ".", "sum", "(", ")", ")", "num_all_ligand", "=", "len", "(", "ref_ligand", ")", "+", "len", "(", "query_ligand", ")", "-", "num_matching_ligand", "num_all_protein", "=", "len", "(", "ref_protein", ")", "+", "len", "(", "query_protein", ")", "-", "num_matching_protein", "numla", "+=", "num_matching_ligand", "numpa", "+=", "num_matching_protein", "nula", "+=", "num_all_ligand", "nupa", "+=", "num_all_protein", "if", "nula", "==", "0", "or", "nupa", "==", "0", ":", "return", "0.", "else", ":", "return", "np", ".", "sqrt", "(", "(", "numla", "/", "nula", ")", "*", "(", "numpa", "/", "nupa", ")", ")" ]
https://github.com/oddt/oddt/blob/8cf555820d97a692ade81c101ebe10e28bcb3722/oddt/fingerprints.py#L702-L766
zachwill/flask-engine
7c8ad4bfe36382a8c9286d873ec7b785715832a4
libs/flask/helpers.py
python
safe_join
(directory, filename)
return os.path.join(directory, filename)
Safely join `directory` and `filename`. Example usage:: @app.route('/wiki/<path:filename>') def wiki_page(filename): filename = safe_join(app.config['WIKI_FOLDER'], filename) with open(filename, 'rb') as fd: content = fd.read() # Read and process the file content... :param directory: the base directory. :param filename: the untrusted filename relative to that directory. :raises: :class:`~werkzeug.exceptions.NotFound` if the resulting path would fall out of `directory`.
Safely join `directory` and `filename`.
[ "Safely", "join", "directory", "and", "filename", "." ]
def safe_join(directory, filename): """Safely join `directory` and `filename`. Example usage:: @app.route('/wiki/<path:filename>') def wiki_page(filename): filename = safe_join(app.config['WIKI_FOLDER'], filename) with open(filename, 'rb') as fd: content = fd.read() # Read and process the file content... :param directory: the base directory. :param filename: the untrusted filename relative to that directory. :raises: :class:`~werkzeug.exceptions.NotFound` if the resulting path would fall out of `directory`. """ filename = posixpath.normpath(filename) for sep in _os_alt_seps: if sep in filename: raise NotFound() if os.path.isabs(filename) or filename.startswith('../'): raise NotFound() return os.path.join(directory, filename)
[ "def", "safe_join", "(", "directory", ",", "filename", ")", ":", "filename", "=", "posixpath", ".", "normpath", "(", "filename", ")", "for", "sep", "in", "_os_alt_seps", ":", "if", "sep", "in", "filename", ":", "raise", "NotFound", "(", ")", "if", "os", ".", "path", ".", "isabs", "(", "filename", ")", "or", "filename", ".", "startswith", "(", "'../'", ")", ":", "raise", "NotFound", "(", ")", "return", "os", ".", "path", ".", "join", "(", "directory", ",", "filename", ")" ]
https://github.com/zachwill/flask-engine/blob/7c8ad4bfe36382a8c9286d873ec7b785715832a4/libs/flask/helpers.py#L607-L629
a1600012888/YOPO-You-Only-Propagate-Once
b8ae668be829a0ca50647ae21676a932a9634365
experiments/CIFAR10-TRADES/pre-res18.TRADES-YOPO-2-5/network.py
python
PreActResNet.forward
(self, x)
return x
[]
def forward(self, x): x = self.layer_one(x) self.layer_one_out = x self.layer_one_out.requires_grad_() self.layer_one_out.retain_grad() x = self.layer_one_out for layer in self.other_layers: x = layer(x) return x
[ "def", "forward", "(", "self", ",", "x", ")", ":", "x", "=", "self", ".", "layer_one", "(", "x", ")", "self", ".", "layer_one_out", "=", "x", "self", ".", "layer_one_out", ".", "requires_grad_", "(", ")", "self", ".", "layer_one_out", ".", "retain_grad", "(", ")", "x", "=", "self", ".", "layer_one_out", "for", "layer", "in", "self", ".", "other_layers", ":", "x", "=", "layer", "(", "x", ")", "return", "x" ]
https://github.com/a1600012888/YOPO-You-Only-Propagate-Once/blob/b8ae668be829a0ca50647ae21676a932a9634365/experiments/CIFAR10-TRADES/pre-res18.TRADES-YOPO-2-5/network.py#L68-L79
deepmind/dm_control
806a10e896e7c887635328bfa8352604ad0fedae
dm_control/utils/io.py
python
GetResourceFilename
(name, mode='rb')
return name
[]
def GetResourceFilename(name, mode='rb'): del mode # Unused. return name
[ "def", "GetResourceFilename", "(", "name", ",", "mode", "=", "'rb'", ")", ":", "del", "mode", "# Unused.", "return", "name" ]
https://github.com/deepmind/dm_control/blob/806a10e896e7c887635328bfa8352604ad0fedae/dm_control/utils/io.py#L26-L28
coin-or/rbfopt
3ba5320a23f04ac3729eff7b55527f2f1e6f9fdd
src/rbfopt/examples/rbfopt_black_box_example.py
python
RbfoptBlackBox.get_var_type
(self)
return self.var_type
Return the type of each variable. Returns ------- 1D numpy.ndarray[char] An array of length equal to dimension, specifying the type of each variable. Possible types are 'R' for real (continuous) variables, 'I' for integer (discrete) variables, 'C' for categorical (discrete, unordered). Bounds for categorical variables are interpreted the same way as for integer variables, but categorical variables are handled differently by the optimization algorithm; e.g., a categorical variable with bounds [2, 4] can take the value 2, 3 or 4.
Return the type of each variable. Returns ------- 1D numpy.ndarray[char] An array of length equal to dimension, specifying the type of each variable. Possible types are 'R' for real (continuous) variables, 'I' for integer (discrete) variables, 'C' for categorical (discrete, unordered). Bounds for categorical variables are interpreted the same way as for integer variables, but categorical variables are handled differently by the optimization algorithm; e.g., a categorical variable with bounds [2, 4] can take the value 2, 3 or 4.
[ "Return", "the", "type", "of", "each", "variable", ".", "Returns", "-------", "1D", "numpy", ".", "ndarray", "[", "char", "]", "An", "array", "of", "length", "equal", "to", "dimension", "specifying", "the", "type", "of", "each", "variable", ".", "Possible", "types", "are", "R", "for", "real", "(", "continuous", ")", "variables", "I", "for", "integer", "(", "discrete", ")", "variables", "C", "for", "categorical", "(", "discrete", "unordered", ")", ".", "Bounds", "for", "categorical", "variables", "are", "interpreted", "the", "same", "way", "as", "for", "integer", "variables", "but", "categorical", "variables", "are", "handled", "differently", "by", "the", "optimization", "algorithm", ";", "e", ".", "g", ".", "a", "categorical", "variable", "with", "bounds", "[", "2", "4", "]", "can", "take", "the", "value", "2", "3", "or", "4", "." ]
def get_var_type(self): """Return the type of each variable. Returns ------- 1D numpy.ndarray[char] An array of length equal to dimension, specifying the type of each variable. Possible types are 'R' for real (continuous) variables, 'I' for integer (discrete) variables, 'C' for categorical (discrete, unordered). Bounds for categorical variables are interpreted the same way as for integer variables, but categorical variables are handled differently by the optimization algorithm; e.g., a categorical variable with bounds [2, 4] can take the value 2, 3 or 4. """ return self.var_type
[ "def", "get_var_type", "(", "self", ")", ":", "return", "self", ".", "var_type" ]
https://github.com/coin-or/rbfopt/blob/3ba5320a23f04ac3729eff7b55527f2f1e6f9fdd/src/rbfopt/examples/rbfopt_black_box_example.py#L123-L140
SanPen/GridCal
d3f4566d2d72c11c7e910c9d162538ef0e60df31
src/GridCal/Gui/GridEditorWidget/transformer2w_graphics.py
python
TransformerGraphicItem.__init__
(self, fromPort: TerminalItem, toPort: TerminalItem, diagramScene, width=5, branch: Transformer2W = None)
:param fromPort: :param toPort: :param diagramScene: :param width: :param branch:
[]
def __init__(self, fromPort: TerminalItem, toPort: TerminalItem, diagramScene, width=5, branch: Transformer2W = None): """ :param fromPort: :param toPort: :param diagramScene: :param width: :param branch: """ QGraphicsLineItem.__init__(self, None) self.api_object = branch if self.api_object is not None: if self.api_object.active: self.style = ACTIVE['style'] self.color = ACTIVE['color'] else: self.style = DEACTIVATED['style'] self.color = DEACTIVATED['color'] else: self.style = OTHER['style'] self.color = OTHER['color'] self.width = width self.pen_width = width self.setPen(QPen(self.color, self.width, self.style)) self.setFlag(self.ItemIsSelectable, True) self.setCursor(QCursor(Qt.PointingHandCursor)) self.pos1 = None self.pos2 = None self.fromPort = None self.toPort = None self.diagramScene = diagramScene if fromPort: self.setFromPort(fromPort) if toPort: self.setToPort(toPort) # add transformer circles self.symbol_type = BranchType.Line self.symbol = None self.c0 = None self.c1 = None self.c2 = None if self.api_object is not None: self.update_symbol() # add the line and it possible children to the scene self.diagramScene.addItem(self) if fromPort and toPort: self.redraw()
[ "def", "__init__", "(", "self", ",", "fromPort", ":", "TerminalItem", ",", "toPort", ":", "TerminalItem", ",", "diagramScene", ",", "width", "=", "5", ",", "branch", ":", "Transformer2W", "=", "None", ")", ":", "QGraphicsLineItem", ".", "__init__", "(", "self", ",", "None", ")", "self", ".", "api_object", "=", "branch", "if", "self", ".", "api_object", "is", "not", "None", ":", "if", "self", ".", "api_object", ".", "active", ":", "self", ".", "style", "=", "ACTIVE", "[", "'style'", "]", "self", ".", "color", "=", "ACTIVE", "[", "'color'", "]", "else", ":", "self", ".", "style", "=", "DEACTIVATED", "[", "'style'", "]", "self", ".", "color", "=", "DEACTIVATED", "[", "'color'", "]", "else", ":", "self", ".", "style", "=", "OTHER", "[", "'style'", "]", "self", ".", "color", "=", "OTHER", "[", "'color'", "]", "self", ".", "width", "=", "width", "self", ".", "pen_width", "=", "width", "self", ".", "setPen", "(", "QPen", "(", "self", ".", "color", ",", "self", ".", "width", ",", "self", ".", "style", ")", ")", "self", ".", "setFlag", "(", "self", ".", "ItemIsSelectable", ",", "True", ")", "self", ".", "setCursor", "(", "QCursor", "(", "Qt", ".", "PointingHandCursor", ")", ")", "self", ".", "pos1", "=", "None", "self", ".", "pos2", "=", "None", "self", ".", "fromPort", "=", "None", "self", ".", "toPort", "=", "None", "self", ".", "diagramScene", "=", "diagramScene", "if", "fromPort", ":", "self", ".", "setFromPort", "(", "fromPort", ")", "if", "toPort", ":", "self", ".", "setToPort", "(", "toPort", ")", "# add transformer circles", "self", ".", "symbol_type", "=", "BranchType", ".", "Line", "self", ".", "symbol", "=", "None", "self", ".", "c0", "=", "None", "self", ".", "c1", "=", "None", "self", ".", "c2", "=", "None", "if", "self", ".", "api_object", "is", "not", "None", ":", "self", ".", "update_symbol", "(", ")", "# add the line and it possible children to the scene", "self", ".", "diagramScene", ".", "addItem", "(", "self", ")", "if", "fromPort", "and", "toPort", ":", "self", ".", "redraw", "(", ")" ]
https://github.com/SanPen/GridCal/blob/d3f4566d2d72c11c7e910c9d162538ef0e60df31/src/GridCal/Gui/GridEditorWidget/transformer2w_graphics.py#L299-L353
nosmokingbandit/watcher
dadacd21a5790ee609058a98a17fcc8954d24439
lib/sqlalchemy/sql/sqltypes.py
python
TIMESTAMP.__init__
(self, timezone=False)
Construct a new :class:`.TIMESTAMP`. :param timezone: boolean. Indicates that the TIMESTAMP type should enable timezone support, if available on the target database. On a per-dialect basis is similar to "TIMESTAMP WITH TIMEZONE". If the target database does not support timezones, this flag is ignored.
Construct a new :class:`.TIMESTAMP`.
[ "Construct", "a", "new", ":", "class", ":", ".", "TIMESTAMP", "." ]
def __init__(self, timezone=False): """Construct a new :class:`.TIMESTAMP`. :param timezone: boolean. Indicates that the TIMESTAMP type should enable timezone support, if available on the target database. On a per-dialect basis is similar to "TIMESTAMP WITH TIMEZONE". If the target database does not support timezones, this flag is ignored. """ super(TIMESTAMP, self).__init__(timezone=timezone)
[ "def", "__init__", "(", "self", ",", "timezone", "=", "False", ")", ":", "super", "(", "TIMESTAMP", ",", "self", ")", ".", "__init__", "(", "timezone", "=", "timezone", ")" ]
https://github.com/nosmokingbandit/watcher/blob/dadacd21a5790ee609058a98a17fcc8954d24439/lib/sqlalchemy/sql/sqltypes.py#L2257-L2268
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/xiaomi_miio/switch.py
python
XiaomiGenericCoordinatedSwitch.async_set_child_lock_off
(self)
return await self._try_command( "Turning the child lock of the miio device off failed.", self._device.set_child_lock, False, )
Turn the child lock off.
Turn the child lock off.
[ "Turn", "the", "child", "lock", "off", "." ]
async def async_set_child_lock_off(self) -> bool: """Turn the child lock off.""" return await self._try_command( "Turning the child lock of the miio device off failed.", self._device.set_child_lock, False, )
[ "async", "def", "async_set_child_lock_off", "(", "self", ")", "->", "bool", ":", "return", "await", "self", ".", "_try_command", "(", "\"Turning the child lock of the miio device off failed.\"", ",", "self", ".", "_device", ".", "set_child_lock", ",", "False", ",", ")" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/xiaomi_miio/switch.py#L522-L528
PlasmaPy/PlasmaPy
78d63e341216475ce3318e1409296480407c9019
plasmapy/particles/particle_collections.py
python
ParticleList._get_particle_attribute
(self, attr, unit=None, default=None)
return values
Get the values of a particular attribute from all of the particles. If a ``unit`` is provided, then this function will return a `~astropy.units.Quantity` array with that unit.
Get the values of a particular attribute from all of the particles.
[ "Get", "the", "values", "of", "a", "particular", "attribute", "from", "all", "of", "the", "particles", "." ]
def _get_particle_attribute(self, attr, unit=None, default=None): """ Get the values of a particular attribute from all of the particles. If a ``unit`` is provided, then this function will return a `~astropy.units.Quantity` array with that unit. """ values = [getattr(particle, attr, default) for particle in self.data] if unit: values = u.Quantity(values) return values
[ "def", "_get_particle_attribute", "(", "self", ",", "attr", ",", "unit", "=", "None", ",", "default", "=", "None", ")", ":", "values", "=", "[", "getattr", "(", "particle", ",", "attr", ",", "default", ")", "for", "particle", "in", "self", ".", "data", "]", "if", "unit", ":", "values", "=", "u", ".", "Quantity", "(", "values", ")", "return", "values" ]
https://github.com/PlasmaPy/PlasmaPy/blob/78d63e341216475ce3318e1409296480407c9019/plasmapy/particles/particle_collections.py#L188-L198
biolab/orange2
db40a9449cb45b507d63dcd5739b223f9cffb8e6
Orange/utils/addons.py
python
open_archive
(path, mode="r")
return archive
Return an open archive file object (zipfile.ZipFile or tarfile.TarFile).
Return an open archive file object (zipfile.ZipFile or tarfile.TarFile).
[ "Return", "an", "open", "archive", "file", "object", "(", "zipfile", ".", "ZipFile", "or", "tarfile", ".", "TarFile", ")", "." ]
def open_archive(path, mode="r"): """ Return an open archive file object (zipfile.ZipFile or tarfile.TarFile). """ _, ext = os.path.splitext(path) if ext == ".zip": # TODO: should it also open .egg, ... archive = zipfile.ZipFile(path, mode) elif ext in (".tar", ".gz", ".bz2", ".tgz", ".tbz2", ".tb2"): archive = tarfile.open(path, mode) return archive
[ "def", "open_archive", "(", "path", ",", "mode", "=", "\"r\"", ")", ":", "_", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "path", ")", "if", "ext", "==", "\".zip\"", ":", "# TODO: should it also open .egg, ...", "archive", "=", "zipfile", ".", "ZipFile", "(", "path", ",", "mode", ")", "elif", "ext", "in", "(", "\".tar\"", ",", "\".gz\"", ",", "\".bz2\"", ",", "\".tgz\"", ",", "\".tbz2\"", ",", "\".tb2\"", ")", ":", "archive", "=", "tarfile", ".", "open", "(", "path", ",", "mode", ")", "return", "archive" ]
https://github.com/biolab/orange2/blob/db40a9449cb45b507d63dcd5739b223f9cffb8e6/Orange/utils/addons.py#L231-L243
golismero/golismero
7d605b937e241f51c1ca4f47b20f755eeefb9d76
tools/sqlmap/thirdparty/odict/odict.py
python
Values.__setitem__
(self, index, value)
Set the value at position i to value. You can only do slice assignment to values if you supply a sequence of equal length to the slice you are replacing.
Set the value at position i to value.
[ "Set", "the", "value", "at", "position", "i", "to", "value", "." ]
def __setitem__(self, index, value): """ Set the value at position i to value. You can only do slice assignment to values if you supply a sequence of equal length to the slice you are replacing. """ if isinstance(index, types.SliceType): keys = self._main._sequence[index] if len(keys) != len(value): raise ValueError('attempt to assign sequence of size %s ' 'to slice of size %s' % (len(name), len(keys))) # FIXME: efficiency? Would be better to calculate the indexes # directly from the slice object # NOTE: the new keys can collide with existing keys (or even # contain duplicates) - these will overwrite for key, val in zip(keys, value): self._main[key] = val else: self._main[self._main._sequence[index]] = value
[ "def", "__setitem__", "(", "self", ",", "index", ",", "value", ")", ":", "if", "isinstance", "(", "index", ",", "types", ".", "SliceType", ")", ":", "keys", "=", "self", ".", "_main", ".", "_sequence", "[", "index", "]", "if", "len", "(", "keys", ")", "!=", "len", "(", "value", ")", ":", "raise", "ValueError", "(", "'attempt to assign sequence of size %s '", "'to slice of size %s'", "%", "(", "len", "(", "name", ")", ",", "len", "(", "keys", ")", ")", ")", "# FIXME: efficiency? Would be better to calculate the indexes", "# directly from the slice object", "# NOTE: the new keys can collide with existing keys (or even", "# contain duplicates) - these will overwrite", "for", "key", ",", "val", "in", "zip", "(", "keys", ",", "value", ")", ":", "self", ".", "_main", "[", "key", "]", "=", "val", "else", ":", "self", ".", "_main", "[", "self", ".", "_main", ".", "_sequence", "[", "index", "]", "]", "=", "value" ]
https://github.com/golismero/golismero/blob/7d605b937e241f51c1ca4f47b20f755eeefb9d76/tools/sqlmap/thirdparty/odict/odict.py#L1101-L1120
yuxiaokui/Intranet-Penetration
f57678a204840c83cbf3308e3470ae56c5ff514b
proxy/XX-Net/code/default/gae_proxy/server/lib/google/appengine/api/taskqueue/taskqueue.py
python
Queue.add_async
(self, task, transactional=False, rpc=None)
return self.__AddTasks(tasks, transactional, fill_function, multiple, rpc)
Asynchronously adds a Task or list of Tasks into this Queue. This function is identical to add() except that it returns an asynchronous object. You can call get_result() on the return value to block on the call. Args: task: A Task instance or a list of Task instances that will be added to the queue. transactional: If True, transactional Tasks will be added to the queue but cannot be run or leased until after the transaction succeeds. If the transaction fails then the Tasks will be removed from the queue (and therefore never run). If False, the added task(s) are available to run immediately; any enclosing transaction's success or failure is ignored. rpc: An optional UserRPC object. Returns: A UserRPC object; call get_result to complete the RPC and obtain the Task or list of Tasks that was supplied to this method. Successfully queued Tasks will have a valid queue name and task name after the call; such Task objects are marked as queued and cannot be added again. Note: Task objects returned from transactional adds are not notified or updated when the enclosing transaction succeeds or fails. Raises: BadTaskStateError: if the Task(s) has already been added to a queue. BadTransactionStateError: if the transactional argument is true but this call is being made outside of the context of a transaction. DuplicateTaskNameError: if a Task name is repeated in the request. InvalidTaskError: if both push and pull tasks exist in the task list. InvalidTaskNameError: if a Task name is provided but is not legal. TooManyTasksError: if task contains more than MAX_TASKS_PER_ADD tasks. TransactionalRequestTooLargeError: if transactional is True and the total size of the tasks and supporting request data exceeds MAX_TRANSACTIONAL_REQUEST_SIZE_BYTES.
Asynchronously adds a Task or list of Tasks into this Queue.
[ "Asynchronously", "adds", "a", "Task", "or", "list", "of", "Tasks", "into", "this", "Queue", "." ]
def add_async(self, task, transactional=False, rpc=None): """Asynchronously adds a Task or list of Tasks into this Queue. This function is identical to add() except that it returns an asynchronous object. You can call get_result() on the return value to block on the call. Args: task: A Task instance or a list of Task instances that will be added to the queue. transactional: If True, transactional Tasks will be added to the queue but cannot be run or leased until after the transaction succeeds. If the transaction fails then the Tasks will be removed from the queue (and therefore never run). If False, the added task(s) are available to run immediately; any enclosing transaction's success or failure is ignored. rpc: An optional UserRPC object. Returns: A UserRPC object; call get_result to complete the RPC and obtain the Task or list of Tasks that was supplied to this method. Successfully queued Tasks will have a valid queue name and task name after the call; such Task objects are marked as queued and cannot be added again. Note: Task objects returned from transactional adds are not notified or updated when the enclosing transaction succeeds or fails. Raises: BadTaskStateError: if the Task(s) has already been added to a queue. BadTransactionStateError: if the transactional argument is true but this call is being made outside of the context of a transaction. DuplicateTaskNameError: if a Task name is repeated in the request. InvalidTaskError: if both push and pull tasks exist in the task list. InvalidTaskNameError: if a Task name is provided but is not legal. TooManyTasksError: if task contains more than MAX_TASKS_PER_ADD tasks. TransactionalRequestTooLargeError: if transactional is True and the total size of the tasks and supporting request data exceeds MAX_TRANSACTIONAL_REQUEST_SIZE_BYTES. """ try: tasks = list(iter(task)) except TypeError: tasks = [task] multiple = False else: multiple = True has_push_task = False has_pull_task = False for task in tasks: if task.method == 'PULL': has_pull_task = True else: has_push_task = True if has_push_task and has_pull_task: raise InvalidTaskError( 'Can not add both push and pull tasks in a single call.') if has_push_task: fill_function = self.__FillAddPushTasksRequest else: fill_function = self.__FillAddPullTasksRequest return self.__AddTasks(tasks, transactional, fill_function, multiple, rpc)
[ "def", "add_async", "(", "self", ",", "task", ",", "transactional", "=", "False", ",", "rpc", "=", "None", ")", ":", "try", ":", "tasks", "=", "list", "(", "iter", "(", "task", ")", ")", "except", "TypeError", ":", "tasks", "=", "[", "task", "]", "multiple", "=", "False", "else", ":", "multiple", "=", "True", "has_push_task", "=", "False", "has_pull_task", "=", "False", "for", "task", "in", "tasks", ":", "if", "task", ".", "method", "==", "'PULL'", ":", "has_pull_task", "=", "True", "else", ":", "has_push_task", "=", "True", "if", "has_push_task", "and", "has_pull_task", ":", "raise", "InvalidTaskError", "(", "'Can not add both push and pull tasks in a single call.'", ")", "if", "has_push_task", ":", "fill_function", "=", "self", ".", "__FillAddPushTasksRequest", "else", ":", "fill_function", "=", "self", ".", "__FillAddPullTasksRequest", "return", "self", ".", "__AddTasks", "(", "tasks", ",", "transactional", ",", "fill_function", ",", "multiple", ",", "rpc", ")" ]
https://github.com/yuxiaokui/Intranet-Penetration/blob/f57678a204840c83cbf3308e3470ae56c5ff514b/proxy/XX-Net/code/default/gae_proxy/server/lib/google/appengine/api/taskqueue/taskqueue.py#L1811-L1880
zvtvz/zvt
054bf8a3e7a049df7087c324fa87e8effbaf5bdc
src/zvt/samples/stock_traders.py
python
MyBullTrader.init_selectors
( self, entity_ids, entity_schema, exchanges, codes, start_timestamp, end_timestamp, adjust_type=None )
[]
def init_selectors( self, entity_ids, entity_schema, exchanges, codes, start_timestamp, end_timestamp, adjust_type=None ): myselector = TargetSelector( entity_ids=entity_ids, entity_schema=entity_schema, exchanges=exchanges, codes=codes, start_timestamp=start_timestamp, end_timestamp=end_timestamp, provider="joinquant", ) myselector.add_factor( BullFactor( entity_ids=entity_ids, entity_schema=entity_schema, exchanges=exchanges, codes=codes, start_timestamp=start_timestamp, end_timestamp=end_timestamp, adjust_type=adjust_type, ) ) self.selectors.append(myselector)
[ "def", "init_selectors", "(", "self", ",", "entity_ids", ",", "entity_schema", ",", "exchanges", ",", "codes", ",", "start_timestamp", ",", "end_timestamp", ",", "adjust_type", "=", "None", ")", ":", "myselector", "=", "TargetSelector", "(", "entity_ids", "=", "entity_ids", ",", "entity_schema", "=", "entity_schema", ",", "exchanges", "=", "exchanges", ",", "codes", "=", "codes", ",", "start_timestamp", "=", "start_timestamp", ",", "end_timestamp", "=", "end_timestamp", ",", "provider", "=", "\"joinquant\"", ",", ")", "myselector", ".", "add_factor", "(", "BullFactor", "(", "entity_ids", "=", "entity_ids", ",", "entity_schema", "=", "entity_schema", ",", "exchanges", "=", "exchanges", ",", "codes", "=", "codes", ",", "start_timestamp", "=", "start_timestamp", ",", "end_timestamp", "=", "end_timestamp", ",", "adjust_type", "=", "adjust_type", ",", ")", ")", "self", ".", "selectors", ".", "append", "(", "myselector", ")" ]
https://github.com/zvtvz/zvt/blob/054bf8a3e7a049df7087c324fa87e8effbaf5bdc/src/zvt/samples/stock_traders.py#L41-L66
makerbot/ReplicatorG
d6f2b07785a5a5f1e172fb87cb4303b17c575d5d
skein_engines/skeinforge-35/fabmetheus_utilities/euclidean.py
python
getIncrementFromRank
( rank )
return float( powerOfTen * moduloMultipliers[ rankModulo ] )
Get the increment from the rank which is 0 at 1 and increases by three every power of ten.
Get the increment from the rank which is 0 at 1 and increases by three every power of ten.
[ "Get", "the", "increment", "from", "the", "rank", "which", "is", "0", "at", "1", "and", "increases", "by", "three", "every", "power", "of", "ten", "." ]
def getIncrementFromRank( rank ): 'Get the increment from the rank which is 0 at 1 and increases by three every power of ten.' rankZone = int( math.floor( rank / 3 ) ) rankModulo = rank % 3 powerOfTen = pow( 10, rankZone ) moduloMultipliers = ( 1, 2, 5 ) return float( powerOfTen * moduloMultipliers[ rankModulo ] )
[ "def", "getIncrementFromRank", "(", "rank", ")", ":", "rankZone", "=", "int", "(", "math", ".", "floor", "(", "rank", "/", "3", ")", ")", "rankModulo", "=", "rank", "%", "3", "powerOfTen", "=", "pow", "(", "10", ",", "rankZone", ")", "moduloMultipliers", "=", "(", "1", ",", "2", ",", "5", ")", "return", "float", "(", "powerOfTen", "*", "moduloMultipliers", "[", "rankModulo", "]", ")" ]
https://github.com/makerbot/ReplicatorG/blob/d6f2b07785a5a5f1e172fb87cb4303b17c575d5d/skein_engines/skeinforge-35/fabmetheus_utilities/euclidean.py#L901-L907
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
ansible/roles/lib_git/library/git_commit.py
python
GitCommit.__init__
(self, msg, path, commit_files, author=None)
Constructor for GitCommit
Constructor for GitCommit
[ "Constructor", "for", "GitCommit" ]
def __init__(self, msg, path, commit_files, author=None): ''' Constructor for GitCommit ''' super(GitCommit, self).__init__(path, author=author) self.path = path self.msg = msg self.commit_files = commit_files self.author = author self.debug = [] os.chdir(path) self.status_results = self._status(porcelain=True) self.debug.append(self.status_results)
[ "def", "__init__", "(", "self", ",", "msg", ",", "path", ",", "commit_files", ",", "author", "=", "None", ")", ":", "super", "(", "GitCommit", ",", "self", ")", ".", "__init__", "(", "path", ",", "author", "=", "author", ")", "self", ".", "path", "=", "path", "self", ".", "msg", "=", "msg", "self", ".", "commit_files", "=", "commit_files", "self", ".", "author", "=", "author", "self", ".", "debug", "=", "[", "]", "os", ".", "chdir", "(", "path", ")", "self", ".", "status_results", "=", "self", ".", "_status", "(", "porcelain", "=", "True", ")", "self", ".", "debug", ".", "append", "(", "self", ".", "status_results", ")" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/ansible/roles/lib_git/library/git_commit.py#L381-L397
fossasia/open-event-legacy
82b585d276efb894a48919bec4f3bff49077e2e8
app/helpers/deployment/kubernetes.py
python
KubernetesApi.get
(self, endpoint, headers=None, params=None, return_json=True)
Make a GET request :param return_json: :param params: :param headers: :param endpoint: :return:
Make a GET request :param return_json: :param params: :param headers: :param endpoint: :return:
[ "Make", "a", "GET", "request", ":", "param", "return_json", ":", ":", "param", "params", ":", ":", "param", "headers", ":", ":", "param", "endpoint", ":", ":", "return", ":" ]
def get(self, endpoint, headers=None, params=None, return_json=True): """ Make a GET request :param return_json: :param params: :param headers: :param endpoint: :return: """ if not headers: headers = self.headers response = requests.get(self.api_url + endpoint, headers=headers, params=params, verify=False) if return_json: return response.json() else: return response.text
[ "def", "get", "(", "self", ",", "endpoint", ",", "headers", "=", "None", ",", "params", "=", "None", ",", "return_json", "=", "True", ")", ":", "if", "not", "headers", ":", "headers", "=", "self", ".", "headers", "response", "=", "requests", ".", "get", "(", "self", ".", "api_url", "+", "endpoint", ",", "headers", "=", "headers", ",", "params", "=", "params", ",", "verify", "=", "False", ")", "if", "return_json", ":", "return", "response", ".", "json", "(", ")", "else", ":", "return", "response", ".", "text" ]
https://github.com/fossasia/open-event-legacy/blob/82b585d276efb894a48919bec4f3bff49077e2e8/app/helpers/deployment/kubernetes.py#L18-L33
rootpy/rootpy
3926935e1f2100d8ba68070c2ab44055d4800f73
rootpy/extern/pyparsing.py
python
ParserElement.validate
( self, validateTrace=[] )
Check defined expressions for valid structure, check for infinite recursive definitions.
Check defined expressions for valid structure, check for infinite recursive definitions.
[ "Check", "defined", "expressions", "for", "valid", "structure", "check", "for", "infinite", "recursive", "definitions", "." ]
def validate( self, validateTrace=[] ): """Check defined expressions for valid structure, check for infinite recursive definitions.""" self.checkRecursion( [] )
[ "def", "validate", "(", "self", ",", "validateTrace", "=", "[", "]", ")", ":", "self", ".", "checkRecursion", "(", "[", "]", ")" ]
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/extern/pyparsing.py#L1489-L1491
django-extensions/django-extensions
f67ff680cd6d7264cdce05309b537ac2e1ee4a70
django_extensions/db/fields/__init__.py
python
AutoSlugField._slug_strip
(self, value)
return re.sub(r'^%s+|%s+$' % (re_sep, re_sep), '', value)
Clean up a slug by removing slug separator characters that occur at the beginning or end of a slug. If an alternate separator is used, it will also replace any instances of the default '-' separator with the new separator.
Clean up a slug by removing slug separator characters that occur at the beginning or end of a slug.
[ "Clean", "up", "a", "slug", "by", "removing", "slug", "separator", "characters", "that", "occur", "at", "the", "beginning", "or", "end", "of", "a", "slug", "." ]
def _slug_strip(self, value): """ Clean up a slug by removing slug separator characters that occur at the beginning or end of a slug. If an alternate separator is used, it will also replace any instances of the default '-' separator with the new separator. """ re_sep = '(?:-|%s)' % re.escape(self.separator) value = re.sub('%s+' % re_sep, self.separator, value) return re.sub(r'^%s+|%s+$' % (re_sep, re_sep), '', value)
[ "def", "_slug_strip", "(", "self", ",", "value", ")", ":", "re_sep", "=", "'(?:-|%s)'", "%", "re", ".", "escape", "(", "self", ".", "separator", ")", "value", "=", "re", ".", "sub", "(", "'%s+'", "%", "re_sep", ",", "self", ".", "separator", ",", "value", ")", "return", "re", ".", "sub", "(", "r'^%s+|%s+$'", "%", "(", "re_sep", ",", "re_sep", ")", ",", "''", ",", "value", ")" ]
https://github.com/django-extensions/django-extensions/blob/f67ff680cd6d7264cdce05309b537ac2e1ee4a70/django_extensions/db/fields/__init__.py#L177-L187
getsentry/sentry
83b1f25aac3e08075e0e2495bc29efaf35aca18a
src/sentry/integrations/slack/endpoints/base.py
python
SlackDMEndpoint.post_dispatcher
(self, request: SlackDMRequest)
return self.respond(SlackHelpMessageBuilder(unknown_command).build())
All Slack commands are handled by this endpoint. This block just validates the request and dispatches it to the right handler.
All Slack commands are handled by this endpoint. This block just validates the request and dispatches it to the right handler.
[ "All", "Slack", "commands", "are", "handled", "by", "this", "endpoint", ".", "This", "block", "just", "validates", "the", "request", "and", "dispatches", "it", "to", "the", "right", "handler", "." ]
def post_dispatcher(self, request: SlackDMRequest) -> Response: """ All Slack commands are handled by this endpoint. This block just validates the request and dispatches it to the right handler. """ command, args = request.get_command_and_args() if command in ["help", ""]: return self.respond(SlackHelpMessageBuilder().build()) if command == "link": if not args: return self.link_user(request) if args[0] == "team": return self.link_team(request) if command == "unlink": if not args: return self.unlink_user(request) if args[0] == "team": return self.unlink_team(request) # If we cannot interpret the command, print help text. request_data = request.data unknown_command = request_data.get("text", "").lower() return self.respond(SlackHelpMessageBuilder(unknown_command).build())
[ "def", "post_dispatcher", "(", "self", ",", "request", ":", "SlackDMRequest", ")", "->", "Response", ":", "command", ",", "args", "=", "request", ".", "get_command_and_args", "(", ")", "if", "command", "in", "[", "\"help\"", ",", "\"\"", "]", ":", "return", "self", ".", "respond", "(", "SlackHelpMessageBuilder", "(", ")", ".", "build", "(", ")", ")", "if", "command", "==", "\"link\"", ":", "if", "not", "args", ":", "return", "self", ".", "link_user", "(", "request", ")", "if", "args", "[", "0", "]", "==", "\"team\"", ":", "return", "self", ".", "link_team", "(", "request", ")", "if", "command", "==", "\"unlink\"", ":", "if", "not", "args", ":", "return", "self", ".", "unlink_user", "(", "request", ")", "if", "args", "[", "0", "]", "==", "\"team\"", ":", "return", "self", ".", "unlink_team", "(", "request", ")", "# If we cannot interpret the command, print help text.", "request_data", "=", "request", ".", "data", "unknown_command", "=", "request_data", ".", "get", "(", "\"text\"", ",", "\"\"", ")", ".", "lower", "(", ")", "return", "self", ".", "respond", "(", "SlackHelpMessageBuilder", "(", "unknown_command", ")", ".", "build", "(", ")", ")" ]
https://github.com/getsentry/sentry/blob/83b1f25aac3e08075e0e2495bc29efaf35aca18a/src/sentry/integrations/slack/endpoints/base.py#L24-L51
robotlearn/pyrobolearn
9cd7c060723fda7d2779fa255ac998c2c82b8436
pyrobolearn/utils/transformation.py
python
get_symbolic_matrix_from_quaternion
(q, convention='xyzw')
return get_matrix_from_quaternion(q, convention=convention)
Get symbolic rotation matrix from the given quaternion. Args: q (np.array[sympy.Symbol[4]], np.array[float[4]]): (symbolic) quaternion. convention (str): convention to be adopted when representing the quaternion. You can choose between 'xyzw' or 'wxyz'. Returns: np.array[sympy.Symbol[3,3]]: symbolic rotation matrix.
Get symbolic rotation matrix from the given quaternion.
[ "Get", "symbolic", "rotation", "matrix", "from", "the", "given", "quaternion", "." ]
def get_symbolic_matrix_from_quaternion(q, convention='xyzw'): """ Get symbolic rotation matrix from the given quaternion. Args: q (np.array[sympy.Symbol[4]], np.array[float[4]]): (symbolic) quaternion. convention (str): convention to be adopted when representing the quaternion. You can choose between 'xyzw' or 'wxyz'. Returns: np.array[sympy.Symbol[3,3]]: symbolic rotation matrix. """ return get_matrix_from_quaternion(q, convention=convention)
[ "def", "get_symbolic_matrix_from_quaternion", "(", "q", ",", "convention", "=", "'xyzw'", ")", ":", "return", "get_matrix_from_quaternion", "(", "q", ",", "convention", "=", "convention", ")" ]
https://github.com/robotlearn/pyrobolearn/blob/9cd7c060723fda7d2779fa255ac998c2c82b8436/pyrobolearn/utils/transformation.py#L520-L532
googleapis/python-bigquery-sqlalchemy
a491136d94cf76128ad06635df2c0dcb2c558a40
samples/snippets/noxfile.py
python
_determine_local_import_names
(start_dir: str)
return [ basename for basename, extension in file_ext_pairs if extension == ".py" or os.path.isdir(os.path.join(start_dir, basename)) and basename not in ("__pycache__") ]
Determines all import names that should be considered "local". This is used when running the linter to insure that import order is properly checked.
Determines all import names that should be considered "local".
[ "Determines", "all", "import", "names", "that", "should", "be", "considered", "local", "." ]
def _determine_local_import_names(start_dir: str) -> List[str]: """Determines all import names that should be considered "local". This is used when running the linter to insure that import order is properly checked. """ file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] return [ basename for basename, extension in file_ext_pairs if extension == ".py" or os.path.isdir(os.path.join(start_dir, basename)) and basename not in ("__pycache__") ]
[ "def", "_determine_local_import_names", "(", "start_dir", ":", "str", ")", "->", "List", "[", "str", "]", ":", "file_ext_pairs", "=", "[", "os", ".", "path", ".", "splitext", "(", "path", ")", "for", "path", "in", "os", ".", "listdir", "(", "start_dir", ")", "]", "return", "[", "basename", "for", "basename", ",", "extension", "in", "file_ext_pairs", "if", "extension", "==", "\".py\"", "or", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "start_dir", ",", "basename", ")", ")", "and", "basename", "not", "in", "(", "\"__pycache__\"", ")", "]" ]
https://github.com/googleapis/python-bigquery-sqlalchemy/blob/a491136d94cf76128ad06635df2c0dcb2c558a40/samples/snippets/noxfile.py#L111-L124
akfamily/akshare
590e50eece9ec067da3538c7059fd660b71f1339
akshare/futures/futures_daily_bar.py
python
get_futures_index
(df: pd.DataFrame)
return pd.concat(index_dfs, axis=1).T
指数日交易数据, 指数合成 :param df: 爬到的原始合约日线行情 :type df: pandas.DataFrame :return: 持仓量加权指数日线行情 :rtype: pandas.DataFrame
指数日交易数据, 指数合成 :param df: 爬到的原始合约日线行情 :type df: pandas.DataFrame :return: 持仓量加权指数日线行情 :rtype: pandas.DataFrame
[ "指数日交易数据", "指数合成", ":", "param", "df", ":", "爬到的原始合约日线行情", ":", "type", "df", ":", "pandas", ".", "DataFrame", ":", "return", ":", "持仓量加权指数日线行情", ":", "rtype", ":", "pandas", ".", "DataFrame" ]
def get_futures_index(df: pd.DataFrame) -> pd.DataFrame: """ 指数日交易数据, 指数合成 :param df: 爬到的原始合约日线行情 :type df: pandas.DataFrame :return: 持仓量加权指数日线行情 :rtype: pandas.DataFrame """ index_dfs = [] for var in set(df["variety"]): df_cut = df[df["variety"] == var] df_cut = df_cut[df_cut["open_interest"] != 0] df_cut = df_cut[df_cut["close"] != np.nan] df_cut = df_cut[df_cut["volume"] != int(0)] if len(df_cut.index) > 0: index_df = pd.Series(index=df_cut.columns, dtype="object") index_df[["volume", "open_interest", "turnover"]] = df_cut[ ["volume", "open_interest", "turnover"] ].sum() if "efp" in df_cut.iloc[-1, 0]: df_cut = df_cut.iloc[:-1, :] df_cut.replace("", 0, inplace=True) # 20201026 部分数据开盘价空缺 index_df[["open", "high", "low", "close", "settle", "pre_settle"]] = ( np.dot( np.array( df_cut[["open", "high", "low", "close", "settle", "pre_settle"]] ).T, np.array((df_cut["open_interest"].astype(float))), ) / np.sum(df_cut["open_interest"].astype(float)) ) index_df[["date", "variety"]] = df_cut[["date", "variety"]].iloc[0, :] index_df["symbol"] = index_df["variety"] + "99" index_dfs.append(index_df) return pd.concat(index_dfs, axis=1).T
[ "def", "get_futures_index", "(", "df", ":", "pd", ".", "DataFrame", ")", "->", "pd", ".", "DataFrame", ":", "index_dfs", "=", "[", "]", "for", "var", "in", "set", "(", "df", "[", "\"variety\"", "]", ")", ":", "df_cut", "=", "df", "[", "df", "[", "\"variety\"", "]", "==", "var", "]", "df_cut", "=", "df_cut", "[", "df_cut", "[", "\"open_interest\"", "]", "!=", "0", "]", "df_cut", "=", "df_cut", "[", "df_cut", "[", "\"close\"", "]", "!=", "np", ".", "nan", "]", "df_cut", "=", "df_cut", "[", "df_cut", "[", "\"volume\"", "]", "!=", "int", "(", "0", ")", "]", "if", "len", "(", "df_cut", ".", "index", ")", ">", "0", ":", "index_df", "=", "pd", ".", "Series", "(", "index", "=", "df_cut", ".", "columns", ",", "dtype", "=", "\"object\"", ")", "index_df", "[", "[", "\"volume\"", ",", "\"open_interest\"", ",", "\"turnover\"", "]", "]", "=", "df_cut", "[", "[", "\"volume\"", ",", "\"open_interest\"", ",", "\"turnover\"", "]", "]", ".", "sum", "(", ")", "if", "\"efp\"", "in", "df_cut", ".", "iloc", "[", "-", "1", ",", "0", "]", ":", "df_cut", "=", "df_cut", ".", "iloc", "[", ":", "-", "1", ",", ":", "]", "df_cut", ".", "replace", "(", "\"\"", ",", "0", ",", "inplace", "=", "True", ")", "# 20201026 部分数据开盘价空缺", "index_df", "[", "[", "\"open\"", ",", "\"high\"", ",", "\"low\"", ",", "\"close\"", ",", "\"settle\"", ",", "\"pre_settle\"", "]", "]", "=", "(", "np", ".", "dot", "(", "np", ".", "array", "(", "df_cut", "[", "[", "\"open\"", ",", "\"high\"", ",", "\"low\"", ",", "\"close\"", ",", "\"settle\"", ",", "\"pre_settle\"", "]", "]", ")", ".", "T", ",", "np", ".", "array", "(", "(", "df_cut", "[", "\"open_interest\"", "]", ".", "astype", "(", "float", ")", ")", ")", ",", ")", "/", "np", ".", "sum", "(", "df_cut", "[", "\"open_interest\"", "]", ".", "astype", "(", "float", ")", ")", ")", "index_df", "[", "[", "\"date\"", ",", "\"variety\"", "]", "]", "=", "df_cut", "[", "[", "\"date\"", ",", "\"variety\"", "]", "]", ".", "iloc", "[", "0", ",", ":", "]", "index_df", "[", "\"symbol\"", "]", "=", "index_df", "[", "\"variety\"", "]", "+", "\"99\"", "index_dfs", ".", "append", "(", "index_df", ")", "return", "pd", ".", "concat", "(", "index_dfs", ",", "axis", "=", "1", ")", ".", "T" ]
https://github.com/akfamily/akshare/blob/590e50eece9ec067da3538c7059fd660b71f1339/akshare/futures/futures_daily_bar.py#L566-L600
dsmrreader/dsmr-reader
c037848e0f96028fb500415b9289df40f81bc14f
dsmr_backend/management/commands/dsmr_superuser.py
python
Command.handle
(self, **options)
WARNING: Only safe for command line execution. Do NOT use for web requests!
WARNING: Only safe for command line execution. Do NOT use for web requests!
[ "WARNING", ":", "Only", "safe", "for", "command", "line", "execution", ".", "Do", "NOT", "use", "for", "web", "requests!" ]
def handle(self, **options): """ WARNING: Only safe for command line execution. Do NOT use for web requests! """ username = config( 'DSMR_USER', # @deprecated v4.5, removed v5.0 default=config('DSMRREADER_ADMIN_USER', default=None) ) password = config( 'DSMR_PASSWORD', # @deprecated v4.5, removed v5.0 default=config('DSMRREADER_ADMIN_PASSWORD') ) if not username or not password: raise CommandError( 'Either DSMRREADER_ADMIN_USER or DSMRREADER_ADMIN_PASSWORD (or both) are missing/empty in (.)env' ) try: user = User.objects.get( username=username, is_superuser=True ) except User.DoesNotExist: print('Creating new superuser "{}"'.format(username)) user = User.objects.create_superuser(username, '{}@localhost'.format(username), password) else: print('Updating password of superuser "{}"'.format(username)) user.set_password(password) user.is_active = True user.save() # Do not allow any other users to be active at the same time. print('Deactivating any other existing superusers') User.objects.filter( is_superuser=True ).exclude( pk=user.pk ).update( is_active=False )
[ "def", "handle", "(", "self", ",", "*", "*", "options", ")", ":", "username", "=", "config", "(", "'DSMR_USER'", ",", "# @deprecated v4.5, removed v5.0", "default", "=", "config", "(", "'DSMRREADER_ADMIN_USER'", ",", "default", "=", "None", ")", ")", "password", "=", "config", "(", "'DSMR_PASSWORD'", ",", "# @deprecated v4.5, removed v5.0", "default", "=", "config", "(", "'DSMRREADER_ADMIN_PASSWORD'", ")", ")", "if", "not", "username", "or", "not", "password", ":", "raise", "CommandError", "(", "'Either DSMRREADER_ADMIN_USER or DSMRREADER_ADMIN_PASSWORD (or both) are missing/empty in (.)env'", ")", "try", ":", "user", "=", "User", ".", "objects", ".", "get", "(", "username", "=", "username", ",", "is_superuser", "=", "True", ")", "except", "User", ".", "DoesNotExist", ":", "print", "(", "'Creating new superuser \"{}\"'", ".", "format", "(", "username", ")", ")", "user", "=", "User", ".", "objects", ".", "create_superuser", "(", "username", ",", "'{}@localhost'", ".", "format", "(", "username", ")", ",", "password", ")", "else", ":", "print", "(", "'Updating password of superuser \"{}\"'", ".", "format", "(", "username", ")", ")", "user", ".", "set_password", "(", "password", ")", "user", ".", "is_active", "=", "True", "user", ".", "save", "(", ")", "# Do not allow any other users to be active at the same time.", "print", "(", "'Deactivating any other existing superusers'", ")", "User", ".", "objects", ".", "filter", "(", "is_superuser", "=", "True", ")", ".", "exclude", "(", "pk", "=", "user", ".", "pk", ")", ".", "update", "(", "is_active", "=", "False", ")" ]
https://github.com/dsmrreader/dsmr-reader/blob/c037848e0f96028fb500415b9289df40f81bc14f/dsmr_backend/management/commands/dsmr_superuser.py#L9-L47
etetoolkit/ete
2b207357dc2a40ccad7bfd8f54964472c72e4726
ete3/phyloxml/_phyloxml.py
python
Clade.set_name
(self, name)
[]
def set_name(self, name): self.name = name
[ "def", "set_name", "(", "self", ",", "name", ")", ":", "self", ".", "name", "=", "name" ]
https://github.com/etetoolkit/ete/blob/2b207357dc2a40ccad7bfd8f54964472c72e4726/ete3/phyloxml/_phyloxml.py#L847-L847
chapmanb/bcbb
dbfb52711f0bfcc1d26c5a5b53c9ff4f50dc0027
nextgen/scripts/bcbio_nextgen_install.py
python
install_bcbio_nextgen
(requirements, datadir, tooldir, use_sudo)
Install a virtualenv containing bcbio_nextgen depdencies.
Install a virtualenv containing bcbio_nextgen depdencies.
[ "Install", "a", "virtualenv", "containing", "bcbio_nextgen", "depdencies", "." ]
def install_bcbio_nextgen(requirements, datadir, tooldir, use_sudo): """Install a virtualenv containing bcbio_nextgen depdencies. """ virtualenv_dir = os.path.join(datadir, "bcbio-nextgen-virtualenv") if not os.path.exists(virtualenv_dir): subprocess.check_call(["virtualenv", "--no-site-packages", "--distribute", virtualenv_dir]) sudo_cmd = ["sudo"] if use_sudo else [] subprocess.check_call(sudo_cmd + ["pip", "install", "--upgrade", "distribute"]) subprocess.check_call([os.path.join(virtualenv_dir, "bin", "pip"), "install", "-r", requirements]) for script in ["bcbio_nextgen.py", "bam_to_wiggle.py"]: final_script = os.path.join(tooldir, "bin", script) ve_script = os.path.join(virtualenv_dir, "bin", script) if not os.path.exists(final_script): cmd = ["ln", "-s", ve_script, final_script] subprocess.check_call(sudo_cmd + cmd)
[ "def", "install_bcbio_nextgen", "(", "requirements", ",", "datadir", ",", "tooldir", ",", "use_sudo", ")", ":", "virtualenv_dir", "=", "os", ".", "path", ".", "join", "(", "datadir", ",", "\"bcbio-nextgen-virtualenv\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "virtualenv_dir", ")", ":", "subprocess", ".", "check_call", "(", "[", "\"virtualenv\"", ",", "\"--no-site-packages\"", ",", "\"--distribute\"", ",", "virtualenv_dir", "]", ")", "sudo_cmd", "=", "[", "\"sudo\"", "]", "if", "use_sudo", "else", "[", "]", "subprocess", ".", "check_call", "(", "sudo_cmd", "+", "[", "\"pip\"", ",", "\"install\"", ",", "\"--upgrade\"", ",", "\"distribute\"", "]", ")", "subprocess", ".", "check_call", "(", "[", "os", ".", "path", ".", "join", "(", "virtualenv_dir", ",", "\"bin\"", ",", "\"pip\"", ")", ",", "\"install\"", ",", "\"-r\"", ",", "requirements", "]", ")", "for", "script", "in", "[", "\"bcbio_nextgen.py\"", ",", "\"bam_to_wiggle.py\"", "]", ":", "final_script", "=", "os", ".", "path", ".", "join", "(", "tooldir", ",", "\"bin\"", ",", "script", ")", "ve_script", "=", "os", ".", "path", ".", "join", "(", "virtualenv_dir", ",", "\"bin\"", ",", "script", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "final_script", ")", ":", "cmd", "=", "[", "\"ln\"", ",", "\"-s\"", ",", "ve_script", ",", "final_script", "]", "subprocess", ".", "check_call", "(", "sudo_cmd", "+", "cmd", ")" ]
https://github.com/chapmanb/bcbb/blob/dbfb52711f0bfcc1d26c5a5b53c9ff4f50dc0027/nextgen/scripts/bcbio_nextgen_install.py#L51-L66
Source-Python-Dev-Team/Source.Python
d0ffd8ccbd1e9923c9bc44936f20613c1c76b7fb
addons/source-python/packages/site-packages/sqlalchemy/sql/visitors.py
python
ReplacingCloningVisitor.traverse
(self, obj)
return replacement_traverse(obj, self.__traverse_options__, replace)
traverse and visit the given expression structure.
traverse and visit the given expression structure.
[ "traverse", "and", "visit", "the", "given", "expression", "structure", "." ]
def traverse(self, obj): """traverse and visit the given expression structure.""" def replace(elem): for v in self._visitor_iterator: e = v.replace(elem) if e is not None: return e return replacement_traverse(obj, self.__traverse_options__, replace)
[ "def", "traverse", "(", "self", ",", "obj", ")", ":", "def", "replace", "(", "elem", ")", ":", "for", "v", "in", "self", ".", "_visitor_iterator", ":", "e", "=", "v", ".", "replace", "(", "elem", ")", "if", "e", "is", "not", "None", ":", "return", "e", "return", "replacement_traverse", "(", "obj", ",", "self", ".", "__traverse_options__", ",", "replace", ")" ]
https://github.com/Source-Python-Dev-Team/Source.Python/blob/d0ffd8ccbd1e9923c9bc44936f20613c1c76b7fb/addons/source-python/packages/site-packages/sqlalchemy/sql/visitors.py#L199-L207
brosner/everyblock_code
25397148223dad81e7fbb9c7cf2f169162df4681
ebgeo/ebgeo/utils/feature_reducer.py
python
Reducer.__init__
(self, key_fields)
``key_fields`` is a sequence consisting either or both of strings, which are names of the source datasource's fields, or 2-tuples, which consist of a field name for the destination datasource and a callable which takes a feature and a layer and returns the value of the field.
``key_fields`` is a sequence consisting either or both of strings, which are names of the source datasource's fields, or 2-tuples, which consist of a field name for the destination datasource and a callable which takes a feature and a layer and returns the value of the field.
[ "key_fields", "is", "a", "sequence", "consisting", "either", "or", "both", "of", "strings", "which", "are", "names", "of", "the", "source", "datasource", "s", "fields", "or", "2", "-", "tuples", "which", "consist", "of", "a", "field", "name", "for", "the", "destination", "datasource", "and", "a", "callable", "which", "takes", "a", "feature", "and", "a", "layer", "and", "returns", "the", "value", "of", "the", "field", "." ]
def __init__(self, key_fields): """ ``key_fields`` is a sequence consisting either or both of strings, which are names of the source datasource's fields, or 2-tuples, which consist of a field name for the destination datasource and a callable which takes a feature and a layer and returns the value of the field. """ self.key_fields = key_fields # Create a list of destination field names; order is imporant # and the list comes in handy for creating the destinations' # fields. self.key_fieldnames = [] for fieldname in self.key_fields: if isinstance(fieldname, tuple): fieldname = fieldname[0] self.key_fieldnames.append(fieldname)
[ "def", "__init__", "(", "self", ",", "key_fields", ")", ":", "self", ".", "key_fields", "=", "key_fields", "# Create a list of destination field names; order is imporant", "# and the list comes in handy for creating the destinations'", "# fields.", "self", ".", "key_fieldnames", "=", "[", "]", "for", "fieldname", "in", "self", ".", "key_fields", ":", "if", "isinstance", "(", "fieldname", ",", "tuple", ")", ":", "fieldname", "=", "fieldname", "[", "0", "]", "self", ".", "key_fieldnames", ".", "append", "(", "fieldname", ")" ]
https://github.com/brosner/everyblock_code/blob/25397148223dad81e7fbb9c7cf2f169162df4681/ebgeo/ebgeo/utils/feature_reducer.py#L12-L28
inkandswitch/livebook
93c8d467734787366ad084fc3566bf5cbe249c51
public/pypyjs/modules/numpy/polynomial/legendre.py
python
legpow
(c, pow, maxpower=16)
Raise a Legendre series to a power. Returns the Legendre series `c` raised to the power `pow`. The arguement `c` is a sequence of coefficients ordered from low to high. i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` Parameters ---------- c : array_like 1-D array of Legendre series coefficients ordered from low to high. pow : integer Power to which the series will be raised maxpower : integer, optional Maximum power allowed. This is mainly to limit growth of the series to unmanageable size. Default is 16 Returns ------- coef : ndarray Legendre series of power. See Also -------- legadd, legsub, legmul, legdiv Examples --------
Raise a Legendre series to a power.
[ "Raise", "a", "Legendre", "series", "to", "a", "power", "." ]
def legpow(c, pow, maxpower=16): """Raise a Legendre series to a power. Returns the Legendre series `c` raised to the power `pow`. The arguement `c` is a sequence of coefficients ordered from low to high. i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` Parameters ---------- c : array_like 1-D array of Legendre series coefficients ordered from low to high. pow : integer Power to which the series will be raised maxpower : integer, optional Maximum power allowed. This is mainly to limit growth of the series to unmanageable size. Default is 16 Returns ------- coef : ndarray Legendre series of power. See Also -------- legadd, legsub, legmul, legdiv Examples -------- """ # c is a trimmed copy [c] = pu.as_series([c]) power = int(pow) if power != pow or power < 0: raise ValueError("Power must be a non-negative integer.") elif maxpower is not None and power > maxpower: raise ValueError("Power is too large") elif power == 0: return np.array([1], dtype=c.dtype) elif power == 1: return c else: # This can be made more efficient by using powers of two # in the usual way. prd = c for i in range(2, power + 1): prd = legmul(prd, c) return prd
[ "def", "legpow", "(", "c", ",", "pow", ",", "maxpower", "=", "16", ")", ":", "# c is a trimmed copy", "[", "c", "]", "=", "pu", ".", "as_series", "(", "[", "c", "]", ")", "power", "=", "int", "(", "pow", ")", "if", "power", "!=", "pow", "or", "power", "<", "0", ":", "raise", "ValueError", "(", "\"Power must be a non-negative integer.\"", ")", "elif", "maxpower", "is", "not", "None", "and", "power", ">", "maxpower", ":", "raise", "ValueError", "(", "\"Power is too large\"", ")", "elif", "power", "==", "0", ":", "return", "np", ".", "array", "(", "[", "1", "]", ",", "dtype", "=", "c", ".", "dtype", ")", "elif", "power", "==", "1", ":", "return", "c", "else", ":", "# This can be made more efficient by using powers of two", "# in the usual way.", "prd", "=", "c", "for", "i", "in", "range", "(", "2", ",", "power", "+", "1", ")", ":", "prd", "=", "legmul", "(", "prd", ",", "c", ")", "return", "prd" ]
https://github.com/inkandswitch/livebook/blob/93c8d467734787366ad084fc3566bf5cbe249c51/public/pypyjs/modules/numpy/polynomial/legendre.py#L617-L665
meraki/dashboard-api-python
aef5e6fe5d23a40d435d5c64ff30580a28af07f1
meraki/aio/api/sm.py
python
AsyncSm.createNetworkSmTargetGroup
(self, networkId: str, **kwargs)
return self._session.post(metadata, resource, payload)
**Add a target group** https://developer.cisco.com/meraki/api-v1/#!create-network-sm-target-group - networkId (string): (required) - name (string): The name of this target group - scope (string): The scope and tag options of the target group. Comma separated values beginning with one of withAny, withAll, withoutAny, withoutAll, all, none, followed by tags. Default to none if empty.
**Add a target group** https://developer.cisco.com/meraki/api-v1/#!create-network-sm-target-group
[ "**", "Add", "a", "target", "group", "**", "https", ":", "//", "developer", ".", "cisco", ".", "com", "/", "meraki", "/", "api", "-", "v1", "/", "#!create", "-", "network", "-", "sm", "-", "target", "-", "group" ]
def createNetworkSmTargetGroup(self, networkId: str, **kwargs): """ **Add a target group** https://developer.cisco.com/meraki/api-v1/#!create-network-sm-target-group - networkId (string): (required) - name (string): The name of this target group - scope (string): The scope and tag options of the target group. Comma separated values beginning with one of withAny, withAll, withoutAny, withoutAll, all, none, followed by tags. Default to none if empty. """ kwargs.update(locals()) metadata = { 'tags': ['sm', 'configure', 'targetGroups'], 'operation': 'createNetworkSmTargetGroup' } resource = f'/networks/{networkId}/sm/targetGroups' body_params = ['name', 'scope', ] payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params} return self._session.post(metadata, resource, payload)
[ "def", "createNetworkSmTargetGroup", "(", "self", ",", "networkId", ":", "str", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "locals", "(", ")", ")", "metadata", "=", "{", "'tags'", ":", "[", "'sm'", ",", "'configure'", ",", "'targetGroups'", "]", ",", "'operation'", ":", "'createNetworkSmTargetGroup'", "}", "resource", "=", "f'/networks/{networkId}/sm/targetGroups'", "body_params", "=", "[", "'name'", ",", "'scope'", ",", "]", "payload", "=", "{", "k", ".", "strip", "(", ")", ":", "v", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", "if", "k", ".", "strip", "(", ")", "in", "body_params", "}", "return", "self", ".", "_session", ".", "post", "(", "metadata", ",", "resource", ",", "payload", ")" ]
https://github.com/meraki/dashboard-api-python/blob/aef5e6fe5d23a40d435d5c64ff30580a28af07f1/meraki/aio/api/sm.py#L610-L631
ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework
cb692f527e4e819b6c228187c5702d990a180043
external/Scripting Engine/Xenotix Python Scripting Engine/bin/x86/Debug/Lib/pipes.py
python
Template.prepend
(self, cmd, kind)
t.prepend(cmd, kind) adds a new step at the front.
t.prepend(cmd, kind) adds a new step at the front.
[ "t", ".", "prepend", "(", "cmd", "kind", ")", "adds", "a", "new", "step", "at", "the", "front", "." ]
def prepend(self, cmd, kind): """t.prepend(cmd, kind) adds a new step at the front.""" if type(cmd) is not type(''): raise TypeError, \ 'Template.prepend: cmd must be a string' if kind not in stepkinds: raise ValueError, \ 'Template.prepend: bad kind %r' % (kind,) if kind == SINK: raise ValueError, \ 'Template.prepend: SINK can only be appended' if self.steps and self.steps[0][1] == SOURCE: raise ValueError, \ 'Template.prepend: already begins with SOURCE' if kind[0] == 'f' and not re.search(r'\$IN\b', cmd): raise ValueError, \ 'Template.prepend: missing $IN in cmd' if kind[1] == 'f' and not re.search(r'\$OUT\b', cmd): raise ValueError, \ 'Template.prepend: missing $OUT in cmd' self.steps.insert(0, (cmd, kind))
[ "def", "prepend", "(", "self", ",", "cmd", ",", "kind", ")", ":", "if", "type", "(", "cmd", ")", "is", "not", "type", "(", "''", ")", ":", "raise", "TypeError", ",", "'Template.prepend: cmd must be a string'", "if", "kind", "not", "in", "stepkinds", ":", "raise", "ValueError", ",", "'Template.prepend: bad kind %r'", "%", "(", "kind", ",", ")", "if", "kind", "==", "SINK", ":", "raise", "ValueError", ",", "'Template.prepend: SINK can only be appended'", "if", "self", ".", "steps", "and", "self", ".", "steps", "[", "0", "]", "[", "1", "]", "==", "SOURCE", ":", "raise", "ValueError", ",", "'Template.prepend: already begins with SOURCE'", "if", "kind", "[", "0", "]", "==", "'f'", "and", "not", "re", ".", "search", "(", "r'\\$IN\\b'", ",", "cmd", ")", ":", "raise", "ValueError", ",", "'Template.prepend: missing $IN in cmd'", "if", "kind", "[", "1", "]", "==", "'f'", "and", "not", "re", ".", "search", "(", "r'\\$OUT\\b'", ",", "cmd", ")", ":", "raise", "ValueError", ",", "'Template.prepend: missing $OUT in cmd'", "self", ".", "steps", ".", "insert", "(", "0", ",", "(", "cmd", ",", "kind", ")", ")" ]
https://github.com/ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework/blob/cb692f527e4e819b6c228187c5702d990a180043/external/Scripting Engine/Xenotix Python Scripting Engine/bin/x86/Debug/Lib/pipes.py#L132-L152
rytilahti/python-miio
b6e53dd16fac77915426e7592e2528b78ef65190
miio/integrations/vacuum/roborock/vacuum.py
python
RoborockVacuum.stop
(self)
return self.send("app_stop")
Stop cleaning. Note, prefer 'pause' instead of this for wider support. Some newer vacuum models do not support this command.
Stop cleaning.
[ "Stop", "cleaning", "." ]
def stop(self): """Stop cleaning. Note, prefer 'pause' instead of this for wider support. Some newer vacuum models do not support this command. """ return self.send("app_stop")
[ "def", "stop", "(", "self", ")", ":", "return", "self", ".", "send", "(", "\"app_stop\"", ")" ]
https://github.com/rytilahti/python-miio/blob/b6e53dd16fac77915426e7592e2528b78ef65190/miio/integrations/vacuum/roborock/vacuum.py#L190-L196
qmlcode/qml
8bb833cdbbe69405384d6796920c5418dc53b6ba
qml/arad.py
python
get_local_symmetric_kernels_arad
(X1, sigmas, width=0.2, cut_distance=5.0, r_width=1.0, c_width=0.5)
return fget_local_symmetric_kernels_arad(X1, Z1_arad, N1, sigmas, nm1, nsigmas, width, cut_distance, r_width, c_width)
Calculates the Gaussian kernel matrix K for atomic ARAD descriptors for a list of different sigmas. Each kernel element is the sum of all kernel elements between pairs of atoms in two molecules. K is calculated using an OpenMP parallel Fortran routine. :param X1: ARAD descriptors for molecules in set 1. :type X1: numpy array :param sigmas: List of sigmas for which to calculate the Kernel matrices. :type sigmas: list :return: The kernel matrices for each sigma - shape (number_sigmas, number_molecules1, number_molecules1) :rtype: numpy array
Calculates the Gaussian kernel matrix K for atomic ARAD descriptors for a list of different sigmas. Each kernel element is the sum of all kernel elements between pairs of atoms in two molecules.
[ "Calculates", "the", "Gaussian", "kernel", "matrix", "K", "for", "atomic", "ARAD", "descriptors", "for", "a", "list", "of", "different", "sigmas", ".", "Each", "kernel", "element", "is", "the", "sum", "of", "all", "kernel", "elements", "between", "pairs", "of", "atoms", "in", "two", "molecules", "." ]
def get_local_symmetric_kernels_arad(X1, sigmas, width=0.2, cut_distance=5.0, r_width=1.0, c_width=0.5): """ Calculates the Gaussian kernel matrix K for atomic ARAD descriptors for a list of different sigmas. Each kernel element is the sum of all kernel elements between pairs of atoms in two molecules. K is calculated using an OpenMP parallel Fortran routine. :param X1: ARAD descriptors for molecules in set 1. :type X1: numpy array :param sigmas: List of sigmas for which to calculate the Kernel matrices. :type sigmas: list :return: The kernel matrices for each sigma - shape (number_sigmas, number_molecules1, number_molecules1) :rtype: numpy array """ nm1 = X1.shape[0] amax = X1.shape[1] N1 = np.empty(nm1, dtype = np.int32) Z1_arad = np.zeros((nm1, amax, 2)) for i in range(nm1): N1[i] = len(np.where(X1[i,:,2,0] > 0)[0]) Z1_arad[i] = X1[i,:,1:3,0] sigmas = np.array(sigmas) nsigmas = sigmas.size return fget_local_symmetric_kernels_arad(X1, Z1_arad, N1, sigmas, nm1, nsigmas, width, cut_distance, r_width, c_width)
[ "def", "get_local_symmetric_kernels_arad", "(", "X1", ",", "sigmas", ",", "width", "=", "0.2", ",", "cut_distance", "=", "5.0", ",", "r_width", "=", "1.0", ",", "c_width", "=", "0.5", ")", ":", "nm1", "=", "X1", ".", "shape", "[", "0", "]", "amax", "=", "X1", ".", "shape", "[", "1", "]", "N1", "=", "np", ".", "empty", "(", "nm1", ",", "dtype", "=", "np", ".", "int32", ")", "Z1_arad", "=", "np", ".", "zeros", "(", "(", "nm1", ",", "amax", ",", "2", ")", ")", "for", "i", "in", "range", "(", "nm1", ")", ":", "N1", "[", "i", "]", "=", "len", "(", "np", ".", "where", "(", "X1", "[", "i", ",", ":", ",", "2", ",", "0", "]", ">", "0", ")", "[", "0", "]", ")", "Z1_arad", "[", "i", "]", "=", "X1", "[", "i", ",", ":", ",", "1", ":", "3", ",", "0", "]", "sigmas", "=", "np", ".", "array", "(", "sigmas", ")", "nsigmas", "=", "sigmas", ".", "size", "return", "fget_local_symmetric_kernels_arad", "(", "X1", ",", "Z1_arad", ",", "N1", ",", "sigmas", ",", "nm1", ",", "nsigmas", ",", "width", ",", "cut_distance", ",", "r_width", ",", "c_width", ")" ]
https://github.com/qmlcode/qml/blob/8bb833cdbbe69405384d6796920c5418dc53b6ba/qml/arad.py#L264-L294
auDeep/auDeep
07df37b4fde5b10cd96a0c94d8804a1612c10d6f
audeep/backend/data/data_set.py
python
_Instance.__str__
(self)
return self._data[dict(instance=self._instance)].__str__()
Returns a string representation of this instance. Returns ------- str A string representation of this instance
Returns a string representation of this instance. Returns ------- str A string representation of this instance
[ "Returns", "a", "string", "representation", "of", "this", "instance", ".", "Returns", "-------", "str", "A", "string", "representation", "of", "this", "instance" ]
def __str__(self) -> str: """ Returns a string representation of this instance. Returns ------- str A string representation of this instance """ return self._data[dict(instance=self._instance)].__str__()
[ "def", "__str__", "(", "self", ")", "->", "str", ":", "return", "self", ".", "_data", "[", "dict", "(", "instance", "=", "self", ".", "_instance", ")", "]", ".", "__str__", "(", ")" ]
https://github.com/auDeep/auDeep/blob/07df37b4fde5b10cd96a0c94d8804a1612c10d6f/audeep/backend/data/data_set.py#L483-L492
marshmallow-code/django-rest-marshmallow
117f89bd5f06de6049dd51d4705d0ccafcc351d7
rest_marshmallow/__init__.py
python
Schema.data
(self)
return self._serializer_data
[]
def data(self): # We're overriding the default implementation here, because the # '_data' property clashes with marshmallow's implementation. if hasattr(self, 'initial_data') and not hasattr(self, '_validated_data'): msg = ( 'When a serializer is passed a `data` keyword argument you ' 'must call `.is_valid()` before attempting to access the ' 'serialized `.data` representation.\n' 'You should either call `.is_valid()` first, ' 'or access `.initial_data` instead.' ) raise AssertionError(msg) if not hasattr(self, '_serializer_data'): if self.instance is not None and not getattr(self, '_errors', None): self._serializer_data = self.to_representation(self.instance) elif hasattr(self, '_validated_data') and not getattr(self, '_errors', None): self._serializer_data = self.to_representation(self.validated_data) else: self._serializer_data = self.get_initial() return self._serializer_data
[ "def", "data", "(", "self", ")", ":", "# We're overriding the default implementation here, because the", "# '_data' property clashes with marshmallow's implementation.", "if", "hasattr", "(", "self", ",", "'initial_data'", ")", "and", "not", "hasattr", "(", "self", ",", "'_validated_data'", ")", ":", "msg", "=", "(", "'When a serializer is passed a `data` keyword argument you '", "'must call `.is_valid()` before attempting to access the '", "'serialized `.data` representation.\\n'", "'You should either call `.is_valid()` first, '", "'or access `.initial_data` instead.'", ")", "raise", "AssertionError", "(", "msg", ")", "if", "not", "hasattr", "(", "self", ",", "'_serializer_data'", ")", ":", "if", "self", ".", "instance", "is", "not", "None", "and", "not", "getattr", "(", "self", ",", "'_errors'", ",", "None", ")", ":", "self", ".", "_serializer_data", "=", "self", ".", "to_representation", "(", "self", ".", "instance", ")", "elif", "hasattr", "(", "self", ",", "'_validated_data'", ")", "and", "not", "getattr", "(", "self", ",", "'_errors'", ",", "None", ")", ":", "self", ".", "_serializer_data", "=", "self", ".", "to_representation", "(", "self", ".", "validated_data", ")", "else", ":", "self", ".", "_serializer_data", "=", "self", ".", "get_initial", "(", ")", "return", "self", ".", "_serializer_data" ]
https://github.com/marshmallow-code/django-rest-marshmallow/blob/117f89bd5f06de6049dd51d4705d0ccafcc351d7/rest_marshmallow/__init__.py#L58-L78
AppScale/gts
46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9
AppServer/google/appengine/api/prospective_search/prospective_search_stub.py
python
ProspectiveSearchStub._WalkQueryTree
(self, query_node, doc, query_field=None, level=0)
return False
Recursive match of doc from query tree at the given node.
Recursive match of doc from query tree at the given node.
[ "Recursive", "match", "of", "doc", "from", "query", "tree", "at", "the", "given", "node", "." ]
def _WalkQueryTree(self, query_node, doc, query_field=None, level=0): """Recursive match of doc from query tree at the given node.""" query_type = query_node.getType() query_text = query_node.getText() self._Debug('_WalkQueryTree: query type: %r, field: %r, text: %r' % (query_type, query_field, query_text), level=level) if query_type is QueryParser.CONJUNCTION: for child in query_node.children: if not self._WalkQueryTree( self.ExtractGlobalEq(child), doc, query_field, level=level + 1): return False return True elif query_type is QueryParser.DISJUNCTION: for child in query_node.children: if self._WalkQueryTree( self.ExtractGlobalEq(child), doc, query_field, level=level + 1): return True if query_type is QueryParser.NEGATION: self._Debug(('No such field so no match: field: %r, children: %r' % (query_type, query_node.children[0])), level) child = query_node.children[0] return not self._WalkQueryTree( self.ExtractGlobalEq(child), doc, query_field, level=level + 1) elif query_type is QueryParser.HAS: if query_node.children[0].getType() is not QueryParser.GLOBAL: query_field = query_node.children[0].getText() if query_field not in doc: self._Debug(('No such field so no match: field: %r' % query_field), level) return False return self._WalkQueryTree(query_node.children[1], doc, query_field, level=level + 1) elif query_type is QueryParser.VALUE or query_type is QueryParser.TEXT: if query_parser.IsPhrase(query_node): query_text = query_parser.GetQueryNodeTextUnicode(query_node) if query_field is not None: return self._MatchField(doc, query_field, query_text, level=level) for field_name in doc: if self._MatchField(doc, field_name, query_text, level=level): return True elif query_type in query_parser.COMPARISON_TYPES: query_field = query_node.children[0].getText() query_text = query_node.children[1].getText() if query_field is not None: if query_field not in doc: self._Debug(('No such field so no match: field: %r' % query_field), level) return False return self._MatchField(doc, query_field, query_text, query_type, level=level) for field_name in doc: if self._MatchField(doc, field_name, query_text, query_type, level=level): return True self._Debug('Fallthrough at %s returning false, query_node.children: %s' % (query_text, [n.getText() for n in query_node.children]), level) return False
[ "def", "_WalkQueryTree", "(", "self", ",", "query_node", ",", "doc", ",", "query_field", "=", "None", ",", "level", "=", "0", ")", ":", "query_type", "=", "query_node", ".", "getType", "(", ")", "query_text", "=", "query_node", ".", "getText", "(", ")", "self", ".", "_Debug", "(", "'_WalkQueryTree: query type: %r, field: %r, text: %r'", "%", "(", "query_type", ",", "query_field", ",", "query_text", ")", ",", "level", "=", "level", ")", "if", "query_type", "is", "QueryParser", ".", "CONJUNCTION", ":", "for", "child", "in", "query_node", ".", "children", ":", "if", "not", "self", ".", "_WalkQueryTree", "(", "self", ".", "ExtractGlobalEq", "(", "child", ")", ",", "doc", ",", "query_field", ",", "level", "=", "level", "+", "1", ")", ":", "return", "False", "return", "True", "elif", "query_type", "is", "QueryParser", ".", "DISJUNCTION", ":", "for", "child", "in", "query_node", ".", "children", ":", "if", "self", ".", "_WalkQueryTree", "(", "self", ".", "ExtractGlobalEq", "(", "child", ")", ",", "doc", ",", "query_field", ",", "level", "=", "level", "+", "1", ")", ":", "return", "True", "if", "query_type", "is", "QueryParser", ".", "NEGATION", ":", "self", ".", "_Debug", "(", "(", "'No such field so no match: field: %r, children: %r'", "%", "(", "query_type", ",", "query_node", ".", "children", "[", "0", "]", ")", ")", ",", "level", ")", "child", "=", "query_node", ".", "children", "[", "0", "]", "return", "not", "self", ".", "_WalkQueryTree", "(", "self", ".", "ExtractGlobalEq", "(", "child", ")", ",", "doc", ",", "query_field", ",", "level", "=", "level", "+", "1", ")", "elif", "query_type", "is", "QueryParser", ".", "HAS", ":", "if", "query_node", ".", "children", "[", "0", "]", ".", "getType", "(", ")", "is", "not", "QueryParser", ".", "GLOBAL", ":", "query_field", "=", "query_node", ".", "children", "[", "0", "]", ".", "getText", "(", ")", "if", "query_field", "not", "in", "doc", ":", "self", ".", "_Debug", "(", "(", "'No such field so no match: field: %r'", "%", "query_field", ")", ",", "level", ")", "return", "False", "return", "self", ".", "_WalkQueryTree", "(", "query_node", ".", "children", "[", "1", "]", ",", "doc", ",", "query_field", ",", "level", "=", "level", "+", "1", ")", "elif", "query_type", "is", "QueryParser", ".", "VALUE", "or", "query_type", "is", "QueryParser", ".", "TEXT", ":", "if", "query_parser", ".", "IsPhrase", "(", "query_node", ")", ":", "query_text", "=", "query_parser", ".", "GetQueryNodeTextUnicode", "(", "query_node", ")", "if", "query_field", "is", "not", "None", ":", "return", "self", ".", "_MatchField", "(", "doc", ",", "query_field", ",", "query_text", ",", "level", "=", "level", ")", "for", "field_name", "in", "doc", ":", "if", "self", ".", "_MatchField", "(", "doc", ",", "field_name", ",", "query_text", ",", "level", "=", "level", ")", ":", "return", "True", "elif", "query_type", "in", "query_parser", ".", "COMPARISON_TYPES", ":", "query_field", "=", "query_node", ".", "children", "[", "0", "]", ".", "getText", "(", ")", "query_text", "=", "query_node", ".", "children", "[", "1", "]", ".", "getText", "(", ")", "if", "query_field", "is", "not", "None", ":", "if", "query_field", "not", "in", "doc", ":", "self", ".", "_Debug", "(", "(", "'No such field so no match: field: %r'", "%", "query_field", ")", ",", "level", ")", "return", "False", "return", "self", ".", "_MatchField", "(", "doc", ",", "query_field", ",", "query_text", ",", "query_type", ",", "level", "=", "level", ")", "for", "field_name", "in", "doc", ":", "if", "self", ".", "_MatchField", "(", "doc", ",", "field_name", ",", "query_text", ",", "query_type", ",", "level", "=", "level", ")", ":", "return", "True", "self", ".", "_Debug", "(", "'Fallthrough at %s returning false, query_node.children: %s'", "%", "(", "query_text", ",", "[", "n", ".", "getText", "(", ")", "for", "n", "in", "query_node", ".", "children", "]", ")", ",", "level", ")", "return", "False" ]
https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppServer/google/appengine/api/prospective_search/prospective_search_stub.py#L295-L363
kanzure/nanoengineer
874e4c9f8a9190f093625b267f9767e19f82e6c4
cad/src/dna/commands/DnaSegment/DnaSegment_PropertyManager.py
python
DnaSegment_PropertyManager.close
(self)
Close this property manager. Also sets the name of the self.command's structure to the one displayed in the line edit field. @see self.show() @see: DnaSegment_EditCommand.setStructureName
Close this property manager. Also sets the name of the self.command's structure to the one displayed in the line edit field.
[ "Close", "this", "property", "manager", ".", "Also", "sets", "the", "name", "of", "the", "self", ".", "command", "s", "structure", "to", "the", "one", "displayed", "in", "the", "line", "edit", "field", "." ]
def close(self): """ Close this property manager. Also sets the name of the self.command's structure to the one displayed in the line edit field. @see self.show() @see: DnaSegment_EditCommand.setStructureName """ if self.command is not None: name = str(self.nameLineEdit.text()) self.command.setStructureName(name) _superclass.close(self)
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "command", "is", "not", "None", ":", "name", "=", "str", "(", "self", ".", "nameLineEdit", ".", "text", "(", ")", ")", "self", ".", "command", ".", "setStructureName", "(", "name", ")", "_superclass", ".", "close", "(", "self", ")" ]
https://github.com/kanzure/nanoengineer/blob/874e4c9f8a9190f093625b267f9767e19f82e6c4/cad/src/dna/commands/DnaSegment/DnaSegment_PropertyManager.py#L182-L193
IJDykeman/wangTiles
7c1ee2095ebdf7f72bce07d94c6484915d5cae8b
experimental_code/tiles_3d/venv_mac_py3/lib/python2.7/site-packages/pkg_resources/_vendor/pyparsing.py
python
ParserElement.searchString
( self, instring, maxMatches=_MAX_INT )
Another extension to C{L{scanString}}, simplifying the access to the tokens found to match the given parse expression. May be called with optional C{maxMatches} argument, to clip searching after 'n' matches are found. Example:: # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters cap_word = Word(alphas.upper(), alphas.lower()) print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")) # the sum() builtin can be used to merge results into a single ParseResults object print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))) prints:: [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']] ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
Another extension to C{L{scanString}}, simplifying the access to the tokens found to match the given parse expression. May be called with optional C{maxMatches} argument, to clip searching after 'n' matches are found. Example:: # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters cap_word = Word(alphas.upper(), alphas.lower()) print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
[ "Another", "extension", "to", "C", "{", "L", "{", "scanString", "}}", "simplifying", "the", "access", "to", "the", "tokens", "found", "to", "match", "the", "given", "parse", "expression", ".", "May", "be", "called", "with", "optional", "C", "{", "maxMatches", "}", "argument", "to", "clip", "searching", "after", "n", "matches", "are", "found", ".", "Example", "::", "#", "a", "capitalized", "word", "starts", "with", "an", "uppercase", "letter", "followed", "by", "zero", "or", "more", "lowercase", "letters", "cap_word", "=", "Word", "(", "alphas", ".", "upper", "()", "alphas", ".", "lower", "()", ")", "print", "(", "cap_word", ".", "searchString", "(", "More", "than", "Iron", "more", "than", "Lead", "more", "than", "Gold", "I", "need", "Electricity", "))" ]
def searchString( self, instring, maxMatches=_MAX_INT ): """ Another extension to C{L{scanString}}, simplifying the access to the tokens found to match the given parse expression. May be called with optional C{maxMatches} argument, to clip searching after 'n' matches are found. Example:: # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters cap_word = Word(alphas.upper(), alphas.lower()) print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")) # the sum() builtin can be used to merge results into a single ParseResults object print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))) prints:: [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']] ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity'] """ try: return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ]) except ParseBaseException as exc: if ParserElement.verbose_stacktrace: raise else: # catch and re-raise exception from here, clears out pyparsing internal stack trace raise exc
[ "def", "searchString", "(", "self", ",", "instring", ",", "maxMatches", "=", "_MAX_INT", ")", ":", "try", ":", "return", "ParseResults", "(", "[", "t", "for", "t", ",", "s", ",", "e", "in", "self", ".", "scanString", "(", "instring", ",", "maxMatches", ")", "]", ")", "except", "ParseBaseException", "as", "exc", ":", "if", "ParserElement", ".", "verbose_stacktrace", ":", "raise", "else", ":", "# catch and re-raise exception from here, clears out pyparsing internal stack trace", "raise", "exc" ]
https://github.com/IJDykeman/wangTiles/blob/7c1ee2095ebdf7f72bce07d94c6484915d5cae8b/experimental_code/tiles_3d/venv_mac_py3/lib/python2.7/site-packages/pkg_resources/_vendor/pyparsing.py#L1772-L1797
AppScale/gts
46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9
AppDB/appscale/datastore/scripts/datastore.py
python
MainHandler.set_default_headers
(self)
Instructs clients to close the connection after each response.
Instructs clients to close the connection after each response.
[ "Instructs", "clients", "to", "close", "the", "connection", "after", "each", "response", "." ]
def set_default_headers(self): """ Instructs clients to close the connection after each response. """ self.set_header('Connection', 'close')
[ "def", "set_default_headers", "(", "self", ")", ":", "self", ".", "set_header", "(", "'Connection'", ",", "'close'", ")" ]
https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppDB/appscale/datastore/scripts/datastore.py#L150-L152
tensorflow/lattice
784eca50cbdfedf39f183cc7d298c9fe376b69c0
tensorflow_lattice/python/pwl_calibration_sonnet_module.py
python
PWLCalibration.__init__
(self, input_keypoints, units=1, output_min=None, output_max=None, clamp_min=False, clamp_max=False, monotonicity="none", convexity="none", is_cyclic=False, kernel_init="equal_heights", impute_missing=False, missing_input_value=None, missing_output_value=None, num_projection_iterations=8, **kwargs)
Initializes an instance of `PWLCalibration`. Args: input_keypoints: Ordered list of keypoints of piecewise linear function. Can be anything accepted by tf.convert_to_tensor(). units: Output dimension of the layer. See class comments for details. output_min: Minimum output of calibrator. output_max: Maximum output of calibrator. clamp_min: For monotonic calibrators ensures that output_min is reached. clamp_max: For monotonic calibrators ensures that output_max is reached. monotonicity: Constraints piecewise linear function to be monotonic using 'increasing' or 1 to indicate increasing monotonicity, 'decreasing' or -1 to indicate decreasing monotonicity and 'none' or 0 to indicate no monotonicity constraints. convexity: Constraints piecewise linear function to be convex or concave. Convexity is indicated by 'convex' or 1, concavity is indicated by 'concave' or -1, 'none' or 0 indicates no convexity/concavity constraints. Concavity together with increasing monotonicity as well as convexity together with decreasing monotonicity results in diminishing return constraints. Consider increasing the value of `num_projection_iterations` if convexity is specified, especially with larger number of keypoints. is_cyclic: Whether the output for last keypoint should be identical to output for first keypoint. This is useful for features such as "time of day" or "degree of turn". If inputs are discrete and exactly match keypoints then is_cyclic will have an effect only if TFL regularizers are being used. kernel_init: None or one of: - String `"equal_heights"`: For pieces of pwl function to have equal heights. - String `"equal_slopes"`: For pieces of pwl function to have equal slopes. - Any Sonnet initializer object. If you are passing such object make sure that you know how this module uses the variables. impute_missing: Whether to learn an output for cases where input data is missing. If set to True, either `missing_input_value` should be initialized, or the `call()` method should get pair of tensors. See class input shape description for more details. missing_input_value: If set, all inputs which are equal to this value will be considered as missing. Can not be set if `impute_missing` is False. missing_output_value: If set, instead of learning output for missing inputs, simply maps them into this value. Can not be set if `impute_missing` is False. num_projection_iterations: Number of iterations of the Dykstra's projection algorithm. Constraints are strictly satisfied at the end of each update, but the update will be closer to a true L2 projection with higher number of iterations. See `tfl.pwl_calibration_lib.project_all_constraints` for more details. **kwargs: Other args passed to `snt.Module` initializer. Raises: ValueError: If layer hyperparameters are invalid.
Initializes an instance of `PWLCalibration`.
[ "Initializes", "an", "instance", "of", "PWLCalibration", "." ]
def __init__(self, input_keypoints, units=1, output_min=None, output_max=None, clamp_min=False, clamp_max=False, monotonicity="none", convexity="none", is_cyclic=False, kernel_init="equal_heights", impute_missing=False, missing_input_value=None, missing_output_value=None, num_projection_iterations=8, **kwargs): # pyformat: disable """Initializes an instance of `PWLCalibration`. Args: input_keypoints: Ordered list of keypoints of piecewise linear function. Can be anything accepted by tf.convert_to_tensor(). units: Output dimension of the layer. See class comments for details. output_min: Minimum output of calibrator. output_max: Maximum output of calibrator. clamp_min: For monotonic calibrators ensures that output_min is reached. clamp_max: For monotonic calibrators ensures that output_max is reached. monotonicity: Constraints piecewise linear function to be monotonic using 'increasing' or 1 to indicate increasing monotonicity, 'decreasing' or -1 to indicate decreasing monotonicity and 'none' or 0 to indicate no monotonicity constraints. convexity: Constraints piecewise linear function to be convex or concave. Convexity is indicated by 'convex' or 1, concavity is indicated by 'concave' or -1, 'none' or 0 indicates no convexity/concavity constraints. Concavity together with increasing monotonicity as well as convexity together with decreasing monotonicity results in diminishing return constraints. Consider increasing the value of `num_projection_iterations` if convexity is specified, especially with larger number of keypoints. is_cyclic: Whether the output for last keypoint should be identical to output for first keypoint. This is useful for features such as "time of day" or "degree of turn". If inputs are discrete and exactly match keypoints then is_cyclic will have an effect only if TFL regularizers are being used. kernel_init: None or one of: - String `"equal_heights"`: For pieces of pwl function to have equal heights. - String `"equal_slopes"`: For pieces of pwl function to have equal slopes. - Any Sonnet initializer object. If you are passing such object make sure that you know how this module uses the variables. impute_missing: Whether to learn an output for cases where input data is missing. If set to True, either `missing_input_value` should be initialized, or the `call()` method should get pair of tensors. See class input shape description for more details. missing_input_value: If set, all inputs which are equal to this value will be considered as missing. Can not be set if `impute_missing` is False. missing_output_value: If set, instead of learning output for missing inputs, simply maps them into this value. Can not be set if `impute_missing` is False. num_projection_iterations: Number of iterations of the Dykstra's projection algorithm. Constraints are strictly satisfied at the end of each update, but the update will be closer to a true L2 projection with higher number of iterations. See `tfl.pwl_calibration_lib.project_all_constraints` for more details. **kwargs: Other args passed to `snt.Module` initializer. Raises: ValueError: If layer hyperparameters are invalid. """ # pyformat: enable super(PWLCalibration, self).__init__(**kwargs) pwl_calibration_lib.verify_hyperparameters( input_keypoints=input_keypoints, output_min=output_min, output_max=output_max, monotonicity=monotonicity, convexity=convexity, is_cyclic=is_cyclic) if missing_input_value is not None and not impute_missing: raise ValueError("'missing_input_value' is specified, but " "'impute_missing' is set to False. " "'missing_input_value': " + str(missing_input_value)) if missing_output_value is not None and not impute_missing: raise ValueError("'missing_output_value' is specified, but " "'impute_missing' is set to False. " "'missing_output_value': " + str(missing_output_value)) if input_keypoints is None: raise ValueError("'input_keypoints' can't be None") if monotonicity is None: raise ValueError("'monotonicity' can't be None. Did you mean '0'?") self.input_keypoints = input_keypoints self.units = units self.output_min = output_min self.output_max = output_max self.clamp_min = clamp_min self.clamp_max = clamp_max (self._output_init_min, self._output_init_max, self._output_min_constraints, self._output_max_constraints ) = pwl_calibration_lib.convert_all_constraints(self.output_min, self.output_max, self.clamp_min, self.clamp_max) self.monotonicity = monotonicity self.convexity = convexity self.is_cyclic = is_cyclic if kernel_init == "equal_heights": self.kernel_init = _UniformOutputInitializer( output_min=self._output_init_min, output_max=self._output_init_max, monotonicity=self.monotonicity) elif kernel_init == "equal_slopes": self.kernel_init = _UniformOutputInitializer( output_min=self._output_init_min, output_max=self._output_init_max, monotonicity=self.monotonicity, keypoints=self.input_keypoints) self.impute_missing = impute_missing self.missing_input_value = missing_input_value self.missing_output_value = missing_output_value self.num_projection_iterations = num_projection_iterations
[ "def", "__init__", "(", "self", ",", "input_keypoints", ",", "units", "=", "1", ",", "output_min", "=", "None", ",", "output_max", "=", "None", ",", "clamp_min", "=", "False", ",", "clamp_max", "=", "False", ",", "monotonicity", "=", "\"none\"", ",", "convexity", "=", "\"none\"", ",", "is_cyclic", "=", "False", ",", "kernel_init", "=", "\"equal_heights\"", ",", "impute_missing", "=", "False", ",", "missing_input_value", "=", "None", ",", "missing_output_value", "=", "None", ",", "num_projection_iterations", "=", "8", ",", "*", "*", "kwargs", ")", ":", "# pyformat: disable", "# pyformat: enable", "super", "(", "PWLCalibration", ",", "self", ")", ".", "__init__", "(", "*", "*", "kwargs", ")", "pwl_calibration_lib", ".", "verify_hyperparameters", "(", "input_keypoints", "=", "input_keypoints", ",", "output_min", "=", "output_min", ",", "output_max", "=", "output_max", ",", "monotonicity", "=", "monotonicity", ",", "convexity", "=", "convexity", ",", "is_cyclic", "=", "is_cyclic", ")", "if", "missing_input_value", "is", "not", "None", "and", "not", "impute_missing", ":", "raise", "ValueError", "(", "\"'missing_input_value' is specified, but \"", "\"'impute_missing' is set to False. \"", "\"'missing_input_value': \"", "+", "str", "(", "missing_input_value", ")", ")", "if", "missing_output_value", "is", "not", "None", "and", "not", "impute_missing", ":", "raise", "ValueError", "(", "\"'missing_output_value' is specified, but \"", "\"'impute_missing' is set to False. \"", "\"'missing_output_value': \"", "+", "str", "(", "missing_output_value", ")", ")", "if", "input_keypoints", "is", "None", ":", "raise", "ValueError", "(", "\"'input_keypoints' can't be None\"", ")", "if", "monotonicity", "is", "None", ":", "raise", "ValueError", "(", "\"'monotonicity' can't be None. Did you mean '0'?\"", ")", "self", ".", "input_keypoints", "=", "input_keypoints", "self", ".", "units", "=", "units", "self", ".", "output_min", "=", "output_min", "self", ".", "output_max", "=", "output_max", "self", ".", "clamp_min", "=", "clamp_min", "self", ".", "clamp_max", "=", "clamp_max", "(", "self", ".", "_output_init_min", ",", "self", ".", "_output_init_max", ",", "self", ".", "_output_min_constraints", ",", "self", ".", "_output_max_constraints", ")", "=", "pwl_calibration_lib", ".", "convert_all_constraints", "(", "self", ".", "output_min", ",", "self", ".", "output_max", ",", "self", ".", "clamp_min", ",", "self", ".", "clamp_max", ")", "self", ".", "monotonicity", "=", "monotonicity", "self", ".", "convexity", "=", "convexity", "self", ".", "is_cyclic", "=", "is_cyclic", "if", "kernel_init", "==", "\"equal_heights\"", ":", "self", ".", "kernel_init", "=", "_UniformOutputInitializer", "(", "output_min", "=", "self", ".", "_output_init_min", ",", "output_max", "=", "self", ".", "_output_init_max", ",", "monotonicity", "=", "self", ".", "monotonicity", ")", "elif", "kernel_init", "==", "\"equal_slopes\"", ":", "self", ".", "kernel_init", "=", "_UniformOutputInitializer", "(", "output_min", "=", "self", ".", "_output_init_min", ",", "output_max", "=", "self", ".", "_output_init_max", ",", "monotonicity", "=", "self", ".", "monotonicity", ",", "keypoints", "=", "self", ".", "input_keypoints", ")", "self", ".", "impute_missing", "=", "impute_missing", "self", ".", "missing_input_value", "=", "missing_input_value", "self", ".", "missing_output_value", "=", "missing_output_value", "self", ".", "num_projection_iterations", "=", "num_projection_iterations" ]
https://github.com/tensorflow/lattice/blob/784eca50cbdfedf39f183cc7d298c9fe376b69c0/tensorflow_lattice/python/pwl_calibration_sonnet_module.py#L88-L214
googlearchive/simian
fb9c43946ff7ba29be417068d6447cfc0adfe9ef
src/simian/mac/models/settings.py
python
Settings.SetItem
(cls, name, value)
Set an item into settings. If the item belongs in a serialized container it will be serialized before storage. Args: name: str, like 'ca_public_cert_pem' value: str, value
Set an item into settings.
[ "Set", "an", "item", "into", "settings", "." ]
def SetItem(cls, name, value): """Set an item into settings. If the item belongs in a serialized container it will be serialized before storage. Args: name: str, like 'ca_public_cert_pem' value: str, value """ if Settings.GetType(name) in ['pem', 'string', 'random_str']: return super(Settings, cls).SetItem(name, value) else: return cls.SetSerializedItem(name, value)
[ "def", "SetItem", "(", "cls", ",", "name", ",", "value", ")", ":", "if", "Settings", ".", "GetType", "(", "name", ")", "in", "[", "'pem'", ",", "'string'", ",", "'random_str'", "]", ":", "return", "super", "(", "Settings", ",", "cls", ")", ".", "SetItem", "(", "name", ",", "value", ")", "else", ":", "return", "cls", ".", "SetSerializedItem", "(", "name", ",", "value", ")" ]
https://github.com/googlearchive/simian/blob/fb9c43946ff7ba29be417068d6447cfc0adfe9ef/src/simian/mac/models/settings.py#L301-L314
wistbean/fxxkpython
88e16d79d8dd37236ba6ecd0d0ff11d63143968c
vip/qyxuan/projects/Snake/venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/distlib/util.py
python
FileOperator.newer
(self, source, target)
return os.stat(source).st_mtime > os.stat(target).st_mtime
Tell if the target is newer than the source. Returns true if 'source' exists and is more recently modified than 'target', or if 'source' exists and 'target' doesn't. Returns false if both exist and 'target' is the same age or younger than 'source'. Raise PackagingFileError if 'source' does not exist. Note that this test is not very accurate: files created in the same second will have the same "age".
Tell if the target is newer than the source.
[ "Tell", "if", "the", "target", "is", "newer", "than", "the", "source", "." ]
def newer(self, source, target): """Tell if the target is newer than the source. Returns true if 'source' exists and is more recently modified than 'target', or if 'source' exists and 'target' doesn't. Returns false if both exist and 'target' is the same age or younger than 'source'. Raise PackagingFileError if 'source' does not exist. Note that this test is not very accurate: files created in the same second will have the same "age". """ if not os.path.exists(source): raise DistlibException("file '%r' does not exist" % os.path.abspath(source)) if not os.path.exists(target): return True return os.stat(source).st_mtime > os.stat(target).st_mtime
[ "def", "newer", "(", "self", ",", "source", ",", "target", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "source", ")", ":", "raise", "DistlibException", "(", "\"file '%r' does not exist\"", "%", "os", ".", "path", ".", "abspath", "(", "source", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "target", ")", ":", "return", "True", "return", "os", ".", "stat", "(", "source", ")", ".", "st_mtime", ">", "os", ".", "stat", "(", "target", ")", ".", "st_mtime" ]
https://github.com/wistbean/fxxkpython/blob/88e16d79d8dd37236ba6ecd0d0ff11d63143968c/vip/qyxuan/projects/Snake/venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/distlib/util.py#L493-L511
openstack/python-novaclient
63d368168c87bc0b9a9b7928b42553c609e46089
novaclient/utils.py
python
flatten_dict
(data)
return dict(_flatten(data))
Return a new dict whose sub-dicts have been merged into the original. Each of the parents keys are prepended to the child's to prevent collisions. Any string elements will be JSON parsed before flattening. >>> flatten_dict({'service': {'host':'cloud9@compute-068', 'id': 143}}) {'service_host': colud9@compute-068', 'service_id': 143}
Return a new dict whose sub-dicts have been merged into the original. Each of the parents keys are prepended to the child's to prevent collisions. Any string elements will be JSON parsed before flattening.
[ "Return", "a", "new", "dict", "whose", "sub", "-", "dicts", "have", "been", "merged", "into", "the", "original", ".", "Each", "of", "the", "parents", "keys", "are", "prepended", "to", "the", "child", "s", "to", "prevent", "collisions", ".", "Any", "string", "elements", "will", "be", "JSON", "parsed", "before", "flattening", "." ]
def flatten_dict(data): """Return a new dict whose sub-dicts have been merged into the original. Each of the parents keys are prepended to the child's to prevent collisions. Any string elements will be JSON parsed before flattening. >>> flatten_dict({'service': {'host':'cloud9@compute-068', 'id': 143}}) {'service_host': colud9@compute-068', 'service_id': 143} """ data = data.copy() # Try and decode any nested JSON structures. for key, value in data.items(): if isinstance(value, str): try: data[key] = jsonutils.loads(value) except ValueError: pass return dict(_flatten(data))
[ "def", "flatten_dict", "(", "data", ")", ":", "data", "=", "data", ".", "copy", "(", ")", "# Try and decode any nested JSON structures.", "for", "key", ",", "value", "in", "data", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "str", ")", ":", "try", ":", "data", "[", "key", "]", "=", "jsonutils", ".", "loads", "(", "value", ")", "except", "ValueError", ":", "pass", "return", "dict", "(", "_flatten", "(", "data", ")", ")" ]
https://github.com/openstack/python-novaclient/blob/63d368168c87bc0b9a9b7928b42553c609e46089/novaclient/utils.py#L184-L203
TencentCloud/tencentcloud-sdk-python
3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2
tencentcloud/wss/v20180426/models.py
python
DeleteCertResponse.__init__
(self)
r""" :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str
r""" :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str
[ "r", ":", "param", "RequestId", ":", "唯一请求", "ID,每次请求都会返回。定位问题时需要提供该次请求的", "RequestId。", ":", "type", "RequestId", ":", "str" ]
def __init__(self): r""" :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.RequestId = None
[ "def", "__init__", "(", "self", ")", ":", "self", ".", "RequestId", "=", "None" ]
https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/wss/v20180426/models.py#L54-L59
m-labs/artiq
eaa1505c947c7987cdbd31c24056823c740e84e0
artiq/coredevice/ad9910.py
python
AD9910.measure_io_update_alignment
(self, delay_start: TInt64, delay_stop: TInt64)
return ftw & 1
Use the digital ramp generator to locate the alignment between IO_UPDATE and SYNC_CLK. The ramp generator is set up to a linear frequency ramp (dFTW/t_SYNC_CLK=1) and started at a coarse RTIO time stamp plus `delay_start` and stopped at a coarse RTIO time stamp plus `delay_stop`. :param delay_start: Start IO_UPDATE delay in machine units. :param delay_stop: Stop IO_UPDATE delay in machine units. :return: Odd/even SYNC_CLK cycle indicator.
Use the digital ramp generator to locate the alignment between IO_UPDATE and SYNC_CLK.
[ "Use", "the", "digital", "ramp", "generator", "to", "locate", "the", "alignment", "between", "IO_UPDATE", "and", "SYNC_CLK", "." ]
def measure_io_update_alignment(self, delay_start: TInt64, delay_stop: TInt64) -> TInt32: """Use the digital ramp generator to locate the alignment between IO_UPDATE and SYNC_CLK. The ramp generator is set up to a linear frequency ramp (dFTW/t_SYNC_CLK=1) and started at a coarse RTIO time stamp plus `delay_start` and stopped at a coarse RTIO time stamp plus `delay_stop`. :param delay_start: Start IO_UPDATE delay in machine units. :param delay_stop: Stop IO_UPDATE delay in machine units. :return: Odd/even SYNC_CLK cycle indicator. """ # set up DRG self.set_cfr1(drg_load_lrr=1, drg_autoclear=1) # DRG -> FTW, DRG enable self.set_cfr2(drg_enable=1) # no limits self.write64(_AD9910_REG_RAMP_LIMIT, -1, 0) # DRCTL=0, dt=1 t_SYNC_CLK self.write32(_AD9910_REG_RAMP_RATE, 0x00010000) # dFTW = 1, (work around negative slope) self.write64(_AD9910_REG_RAMP_STEP, -1, 0) # delay io_update after RTIO edge t = now_mu() + 8 & ~7 at_mu(t + delay_start) # assumes a maximum t_SYNC_CLK period self.cpld.io_update.pulse_mu(16 - delay_start) # realign # disable DRG autoclear and LRR on io_update self.set_cfr1() # stop DRG self.write64(_AD9910_REG_RAMP_STEP, 0, 0) at_mu(t + 0x1000 + delay_stop) self.cpld.io_update.pulse_mu(16 - delay_stop) # realign ftw = self.read32(_AD9910_REG_FTW) # read out effective FTW delay(100 * us) # slack # disable DRG self.set_cfr2(drg_enable=0) self.cpld.io_update.pulse_mu(8) return ftw & 1
[ "def", "measure_io_update_alignment", "(", "self", ",", "delay_start", ":", "TInt64", ",", "delay_stop", ":", "TInt64", ")", "->", "TInt32", ":", "# set up DRG", "self", ".", "set_cfr1", "(", "drg_load_lrr", "=", "1", ",", "drg_autoclear", "=", "1", ")", "# DRG -> FTW, DRG enable", "self", ".", "set_cfr2", "(", "drg_enable", "=", "1", ")", "# no limits", "self", ".", "write64", "(", "_AD9910_REG_RAMP_LIMIT", ",", "-", "1", ",", "0", ")", "# DRCTL=0, dt=1 t_SYNC_CLK", "self", ".", "write32", "(", "_AD9910_REG_RAMP_RATE", ",", "0x00010000", ")", "# dFTW = 1, (work around negative slope)", "self", ".", "write64", "(", "_AD9910_REG_RAMP_STEP", ",", "-", "1", ",", "0", ")", "# delay io_update after RTIO edge", "t", "=", "now_mu", "(", ")", "+", "8", "&", "~", "7", "at_mu", "(", "t", "+", "delay_start", ")", "# assumes a maximum t_SYNC_CLK period", "self", ".", "cpld", ".", "io_update", ".", "pulse_mu", "(", "16", "-", "delay_start", ")", "# realign", "# disable DRG autoclear and LRR on io_update", "self", ".", "set_cfr1", "(", ")", "# stop DRG", "self", ".", "write64", "(", "_AD9910_REG_RAMP_STEP", ",", "0", ",", "0", ")", "at_mu", "(", "t", "+", "0x1000", "+", "delay_stop", ")", "self", ".", "cpld", ".", "io_update", ".", "pulse_mu", "(", "16", "-", "delay_stop", ")", "# realign", "ftw", "=", "self", ".", "read32", "(", "_AD9910_REG_FTW", ")", "# read out effective FTW", "delay", "(", "100", "*", "us", ")", "# slack", "# disable DRG", "self", ".", "set_cfr2", "(", "drg_enable", "=", "0", ")", "self", ".", "cpld", ".", "io_update", ".", "pulse_mu", "(", "8", ")", "return", "ftw", "&", "1" ]
https://github.com/m-labs/artiq/blob/eaa1505c947c7987cdbd31c24056823c740e84e0/artiq/coredevice/ad9910.py#L1032-L1072
BotBotMe/botbot-web
0ada6213b5f1d8bb0f71eb79aaf37704f4903564
botbot/apps/plugins/runner.py
python
PluginRunner.check_for_plugin_route_matches
(self, line, router)
Checks the active plugins' routes and calls functions on matches
Checks the active plugins' routes and calls functions on matches
[ "Checks", "the", "active", "plugins", "routes", "and", "calls", "functions", "on", "matches" ]
def check_for_plugin_route_matches(self, line, router): """Checks the active plugins' routes and calls functions on matches""" # get the active routes for this channel active_slugs = line._active_plugin_slugs.intersection(router.viewkeys()) for plugin_slug in active_slugs: for rule, func, plugin in router[plugin_slug]: match = re.match(rule, line.text, re.IGNORECASE) if match: LOG.info('Match: %s.%s', plugin_slug, func.__name__) with statsd.timer(".".join(["plugins", plugin_slug])): # FIXME: This will not have correct timing if go back to # gevent. # Instantiate a plugin specific to this channel channel_plugin = self.setup_plugin_for_channel( plugin.__class__, line) # get the method from the channel-specific plugin new_func = log_on_error(LOG, getattr(channel_plugin, func.__name__)) if hasattr(self, 'gevent'): grnlt = self.gevent.Greenlet(new_func, line, **match.groupdict()) grnlt.link_value(channel_plugin.greenlet_respond) grnlt.start() else: channel_plugin.respond(new_func(line, **match.groupdict()))
[ "def", "check_for_plugin_route_matches", "(", "self", ",", "line", ",", "router", ")", ":", "# get the active routes for this channel", "active_slugs", "=", "line", ".", "_active_plugin_slugs", ".", "intersection", "(", "router", ".", "viewkeys", "(", ")", ")", "for", "plugin_slug", "in", "active_slugs", ":", "for", "rule", ",", "func", ",", "plugin", "in", "router", "[", "plugin_slug", "]", ":", "match", "=", "re", ".", "match", "(", "rule", ",", "line", ".", "text", ",", "re", ".", "IGNORECASE", ")", "if", "match", ":", "LOG", ".", "info", "(", "'Match: %s.%s'", ",", "plugin_slug", ",", "func", ".", "__name__", ")", "with", "statsd", ".", "timer", "(", "\".\"", ".", "join", "(", "[", "\"plugins\"", ",", "plugin_slug", "]", ")", ")", ":", "# FIXME: This will not have correct timing if go back to", "# gevent.", "# Instantiate a plugin specific to this channel", "channel_plugin", "=", "self", ".", "setup_plugin_for_channel", "(", "plugin", ".", "__class__", ",", "line", ")", "# get the method from the channel-specific plugin", "new_func", "=", "log_on_error", "(", "LOG", ",", "getattr", "(", "channel_plugin", ",", "func", ".", "__name__", ")", ")", "if", "hasattr", "(", "self", ",", "'gevent'", ")", ":", "grnlt", "=", "self", ".", "gevent", ".", "Greenlet", "(", "new_func", ",", "line", ",", "*", "*", "match", ".", "groupdict", "(", ")", ")", "grnlt", ".", "link_value", "(", "channel_plugin", ".", "greenlet_respond", ")", "grnlt", ".", "start", "(", ")", "else", ":", "channel_plugin", ".", "respond", "(", "new_func", "(", "line", ",", "*", "*", "match", ".", "groupdict", "(", ")", ")", ")" ]
https://github.com/BotBotMe/botbot-web/blob/0ada6213b5f1d8bb0f71eb79aaf37704f4903564/botbot/apps/plugins/runner.py#L263-L288
readbeyond/aeneas
4d200a050690903b30b3d885b44714fecb23f18a
aeneas/adjustboundaryalgorithm.py
python
AdjustBoundaryAlgorithm._process_zero_length
(self, nozero, allow_arbitrary_shift)
If ``nozero`` is ``True``, modify the sync map fragment list so that no fragment will have zero length.
If ``nozero`` is ``True``, modify the sync map fragment list so that no fragment will have zero length.
[ "If", "nozero", "is", "True", "modify", "the", "sync", "map", "fragment", "list", "so", "that", "no", "fragment", "will", "have", "zero", "length", "." ]
def _process_zero_length(self, nozero, allow_arbitrary_shift): """ If ``nozero`` is ``True``, modify the sync map fragment list so that no fragment will have zero length. """ self.log(u"Called _process_zero_length") if not nozero: self.log(u"Processing zero length intervals not requested: returning") return self.log(u"Processing zero length intervals requested") self.log(u" Checking and fixing...") duration = self.rconf[RuntimeConfiguration.ABA_NO_ZERO_DURATION] self.log([u" Requested no zero duration: %.3f", duration]) if not allow_arbitrary_shift: self.log(u" No arbitrary shift => taking max with mws") duration = self.rconf.mws.geq_multiple(duration) self.log([u" Actual no zero duration: %.3f", duration]) # ignore HEAD and TAIL max_index = len(self.smflist) - 1 self.smflist.fix_zero_length_fragments( duration=duration, min_index=1, max_index=max_index ) self.log(u" Checking and fixing... done") if self.smflist.has_zero_length_fragments(1, max_index): self.log_warn(u" The fragment list still has fragments with zero length") else: self.log(u" The fragment list does not have fragments with zero length")
[ "def", "_process_zero_length", "(", "self", ",", "nozero", ",", "allow_arbitrary_shift", ")", ":", "self", ".", "log", "(", "u\"Called _process_zero_length\"", ")", "if", "not", "nozero", ":", "self", ".", "log", "(", "u\"Processing zero length intervals not requested: returning\"", ")", "return", "self", ".", "log", "(", "u\"Processing zero length intervals requested\"", ")", "self", ".", "log", "(", "u\" Checking and fixing...\"", ")", "duration", "=", "self", ".", "rconf", "[", "RuntimeConfiguration", ".", "ABA_NO_ZERO_DURATION", "]", "self", ".", "log", "(", "[", "u\" Requested no zero duration: %.3f\"", ",", "duration", "]", ")", "if", "not", "allow_arbitrary_shift", ":", "self", ".", "log", "(", "u\" No arbitrary shift => taking max with mws\"", ")", "duration", "=", "self", ".", "rconf", ".", "mws", ".", "geq_multiple", "(", "duration", ")", "self", ".", "log", "(", "[", "u\" Actual no zero duration: %.3f\"", ",", "duration", "]", ")", "# ignore HEAD and TAIL", "max_index", "=", "len", "(", "self", ".", "smflist", ")", "-", "1", "self", ".", "smflist", ".", "fix_zero_length_fragments", "(", "duration", "=", "duration", ",", "min_index", "=", "1", ",", "max_index", "=", "max_index", ")", "self", ".", "log", "(", "u\" Checking and fixing... done\"", ")", "if", "self", ".", "smflist", ".", "has_zero_length_fragments", "(", "1", ",", "max_index", ")", ":", "self", ".", "log_warn", "(", "u\" The fragment list still has fragments with zero length\"", ")", "else", ":", "self", ".", "log", "(", "u\" The fragment list does not have fragments with zero length\"", ")" ]
https://github.com/readbeyond/aeneas/blob/4d200a050690903b30b3d885b44714fecb23f18a/aeneas/adjustboundaryalgorithm.py#L407-L435
gem/oq-engine
1bdb88f3914e390abcbd285600bfd39477aae47c
openquake/hazardlib/gsim/yenier_atkinson_2015.py
python
_get_f_z
(C, imt, rrup, m)
return np.log(z) + (C['b3'] + C['b4']*m)*np.log(ratio_b)
Implements eq. 7 and eq. 8 at page 1991
Implements eq. 7 and eq. 8 at page 1991
[ "Implements", "eq", ".", "7", "and", "eq", ".", "8", "at", "page", "1991" ]
def _get_f_z(C, imt, rrup, m): """ Implements eq. 7 and eq. 8 at page 1991 """ # Pseudo depth - see eq. 6 at page 1991 pseudo_depth = 10**(-0.405+0.235*m) # Effective distance - see eq. 5 at page 1991 reff = (rrup**2+pseudo_depth**2)**0.5 # The transition_distance is 50 km as defined just below eq. 8 transition_dst = 50. # Geometrical spreading rates b1 = -1.3 b2 = -0.5 # Geometrical attenuation z = reff**b1 ratio_a = reff / transition_dst z[reff > transition_dst] = (transition_dst**b1 * (ratio_a[reff > transition_dst])**b2) # Compute geometrical spreading function ratio_b = reff / (1.+pseudo_depth**2)**0.5 return np.log(z) + (C['b3'] + C['b4']*m)*np.log(ratio_b)
[ "def", "_get_f_z", "(", "C", ",", "imt", ",", "rrup", ",", "m", ")", ":", "# Pseudo depth - see eq. 6 at page 1991", "pseudo_depth", "=", "10", "**", "(", "-", "0.405", "+", "0.235", "*", "m", ")", "# Effective distance - see eq. 5 at page 1991", "reff", "=", "(", "rrup", "**", "2", "+", "pseudo_depth", "**", "2", ")", "**", "0.5", "# The transition_distance is 50 km as defined just below eq. 8", "transition_dst", "=", "50.", "# Geometrical spreading rates", "b1", "=", "-", "1.3", "b2", "=", "-", "0.5", "# Geometrical attenuation", "z", "=", "reff", "**", "b1", "ratio_a", "=", "reff", "/", "transition_dst", "z", "[", "reff", ">", "transition_dst", "]", "=", "(", "transition_dst", "**", "b1", "*", "(", "ratio_a", "[", "reff", ">", "transition_dst", "]", ")", "**", "b2", ")", "# Compute geometrical spreading function", "ratio_b", "=", "reff", "/", "(", "1.", "+", "pseudo_depth", "**", "2", ")", "**", "0.5", "return", "np", ".", "log", "(", "z", ")", "+", "(", "C", "[", "'b3'", "]", "+", "C", "[", "'b4'", "]", "*", "m", ")", "*", "np", ".", "log", "(", "ratio_b", ")" ]
https://github.com/gem/oq-engine/blob/1bdb88f3914e390abcbd285600bfd39477aae47c/openquake/hazardlib/gsim/yenier_atkinson_2015.py#L155-L175
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/Python-2.7.9/Lib/pickle.py
python
Unpickler.load_long
(self)
[]
def load_long(self): self.append(long(self.readline()[:-1], 0))
[ "def", "load_long", "(", "self", ")", ":", "self", ".", "append", "(", "long", "(", "self", ".", "readline", "(", ")", "[", ":", "-", "1", "]", ",", "0", ")", ")" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/Python-2.7.9/Lib/pickle.py#L937-L938
mit-han-lab/lite-transformer
1df8001c779deb85819fc30d70349cc334c408ba
fairseq/trainer.py
python
Trainer.dummy_train_step
(self, dummy_batch)
Dummy training step for warming caching allocator.
Dummy training step for warming caching allocator.
[ "Dummy", "training", "step", "for", "warming", "caching", "allocator", "." ]
def dummy_train_step(self, dummy_batch): """Dummy training step for warming caching allocator.""" self.train_step(dummy_batch, dummy_batch=True) self.zero_grad()
[ "def", "dummy_train_step", "(", "self", ",", "dummy_batch", ")", ":", "self", ".", "train_step", "(", "dummy_batch", ",", "dummy_batch", "=", "True", ")", "self", ".", "zero_grad", "(", ")" ]
https://github.com/mit-han-lab/lite-transformer/blob/1df8001c779deb85819fc30d70349cc334c408ba/fairseq/trainer.py#L527-L530
quantumlib/Cirq
89f88b01d69222d3f1ec14d649b7b3a85ed9211f
cirq-google/cirq_google/api/v1/params.py
python
_to_zip_product
(sweep: cirq.Sweep)
return sweep
Converts sweep to a product of zips of single sweeps, if possible.
Converts sweep to a product of zips of single sweeps, if possible.
[ "Converts", "sweep", "to", "a", "product", "of", "zips", "of", "single", "sweeps", "if", "possible", "." ]
def _to_zip_product(sweep: cirq.Sweep) -> cirq.Product: """Converts sweep to a product of zips of single sweeps, if possible.""" if not isinstance(sweep, cirq.Product): sweep = cirq.Product(sweep) if not all(isinstance(f, cirq.Zip) for f in sweep.factors): factors = [f if isinstance(f, cirq.Zip) else cirq.Zip(f) for f in sweep.factors] sweep = cirq.Product(*factors) for factor in sweep.factors: for term in cast(cirq.Zip, factor).sweeps: if not isinstance(term, sweeps.SingleSweep): raise ValueError(f'cannot convert to zip-product form: {sweep}') return sweep
[ "def", "_to_zip_product", "(", "sweep", ":", "cirq", ".", "Sweep", ")", "->", "cirq", ".", "Product", ":", "if", "not", "isinstance", "(", "sweep", ",", "cirq", ".", "Product", ")", ":", "sweep", "=", "cirq", ".", "Product", "(", "sweep", ")", "if", "not", "all", "(", "isinstance", "(", "f", ",", "cirq", ".", "Zip", ")", "for", "f", "in", "sweep", ".", "factors", ")", ":", "factors", "=", "[", "f", "if", "isinstance", "(", "f", ",", "cirq", ".", "Zip", ")", "else", "cirq", ".", "Zip", "(", "f", ")", "for", "f", "in", "sweep", ".", "factors", "]", "sweep", "=", "cirq", ".", "Product", "(", "*", "factors", ")", "for", "factor", "in", "sweep", ".", "factors", ":", "for", "term", "in", "cast", "(", "cirq", ".", "Zip", ",", "factor", ")", ".", "sweeps", ":", "if", "not", "isinstance", "(", "term", ",", "sweeps", ".", "SingleSweep", ")", ":", "raise", "ValueError", "(", "f'cannot convert to zip-product form: {sweep}'", ")", "return", "sweep" ]
https://github.com/quantumlib/Cirq/blob/89f88b01d69222d3f1ec14d649b7b3a85ed9211f/cirq-google/cirq_google/api/v1/params.py#L33-L44
Yelp/bravado-core
382db874b7b838dcfd169b0ce490d6a447ad6ff2
bravado_core/validate.py
python
validate_object
( swagger_spec, # type: Spec object_spec, # type: JSONDict value, # type: typing.Any )
:type swagger_spec: :class:`bravado_core.spec.Spec` :param object_spec: spec for an 'object' type in dict form :type value: dict
:type swagger_spec: :class:`bravado_core.spec.Spec` :param object_spec: spec for an 'object' type in dict form :type value: dict
[ ":", "type", "swagger_spec", ":", ":", "class", ":", "bravado_core", ".", "spec", ".", "Spec", ":", "param", "object_spec", ":", "spec", "for", "an", "object", "type", "in", "dict", "form", ":", "type", "value", ":", "dict" ]
def validate_object( swagger_spec, # type: Spec object_spec, # type: JSONDict value, # type: typing.Any ): # type: (...) -> None """ :type swagger_spec: :class:`bravado_core.spec.Spec` :param object_spec: spec for an 'object' type in dict form :type value: dict """ get_validator_type(swagger_spec=swagger_spec)( object_spec, format_checker=swagger_spec.format_checker, resolver=swagger_spec.resolver, ).validate(value)
[ "def", "validate_object", "(", "swagger_spec", ",", "# type: Spec", "object_spec", ",", "# type: JSONDict", "value", ",", "# type: typing.Any", ")", ":", "# type: (...) -> None", "get_validator_type", "(", "swagger_spec", "=", "swagger_spec", ")", "(", "object_spec", ",", "format_checker", "=", "swagger_spec", ".", "format_checker", ",", "resolver", "=", "swagger_spec", ".", "resolver", ",", ")", ".", "validate", "(", "value", ")" ]
https://github.com/Yelp/bravado-core/blob/382db874b7b838dcfd169b0ce490d6a447ad6ff2/bravado_core/validate.py#L124-L139
linxid/Machine_Learning_Study_Path
558e82d13237114bbb8152483977806fc0c222af
Machine Learning In Action/Chapter4-NaiveBayes/venv/Lib/site-packages/setuptools/glibc.py
python
glibc_version_string
()
return version_str
Returns glibc version string, or None if not using glibc.
Returns glibc version string, or None if not using glibc.
[ "Returns", "glibc", "version", "string", "or", "None", "if", "not", "using", "glibc", "." ]
def glibc_version_string(): "Returns glibc version string, or None if not using glibc." # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen # manpage says, "If filename is NULL, then the returned handle is for the # main program". This way we can let the linker do the work to figure out # which libc our process is actually using. process_namespace = ctypes.CDLL(None) try: gnu_get_libc_version = process_namespace.gnu_get_libc_version except AttributeError: # Symbol doesn't exist -> therefore, we are not linked to # glibc. return None # Call gnu_get_libc_version, which returns a string like "2.5" gnu_get_libc_version.restype = ctypes.c_char_p version_str = gnu_get_libc_version() # py2 / py3 compatibility: if not isinstance(version_str, str): version_str = version_str.decode("ascii") return version_str
[ "def", "glibc_version_string", "(", ")", ":", "# ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen", "# manpage says, \"If filename is NULL, then the returned handle is for the", "# main program\". This way we can let the linker do the work to figure out", "# which libc our process is actually using.", "process_namespace", "=", "ctypes", ".", "CDLL", "(", "None", ")", "try", ":", "gnu_get_libc_version", "=", "process_namespace", ".", "gnu_get_libc_version", "except", "AttributeError", ":", "# Symbol doesn't exist -> therefore, we are not linked to", "# glibc.", "return", "None", "# Call gnu_get_libc_version, which returns a string like \"2.5\"", "gnu_get_libc_version", ".", "restype", "=", "ctypes", ".", "c_char_p", "version_str", "=", "gnu_get_libc_version", "(", ")", "# py2 / py3 compatibility:", "if", "not", "isinstance", "(", "version_str", ",", "str", ")", ":", "version_str", "=", "version_str", ".", "decode", "(", "\"ascii\"", ")", "return", "version_str" ]
https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter4-NaiveBayes/venv/Lib/site-packages/setuptools/glibc.py#L10-L32
psd-tools/psd-tools
00241f3aed2ca52a8012e198a0f390ff7d8edca9
src/psd_tools/api/layers.py
python
Layer.clip_layers
(self)
return self._clip_layers
Clip layers associated with this layer. To compose clipping layers:: from psd_tools import compose clip_mask = compose(layer.clip_layers) :return: list of layers
Clip layers associated with this layer.
[ "Clip", "layers", "associated", "with", "this", "layer", "." ]
def clip_layers(self): """ Clip layers associated with this layer. To compose clipping layers:: from psd_tools import compose clip_mask = compose(layer.clip_layers) :return: list of layers """ return self._clip_layers
[ "def", "clip_layers", "(", "self", ")", ":", "return", "self", ".", "_clip_layers" ]
https://github.com/psd-tools/psd-tools/blob/00241f3aed2ca52a8012e198a0f390ff7d8edca9/src/psd_tools/api/layers.py#L429-L440
openstack/cinder
23494a6d6c51451688191e1847a458f1d3cdcaa5
cinder/volume/drivers/hitachi/hbsd_rest.py
python
HBSDREST.has_snap_pair
(self, pvol, svol)
return (result[0]['primaryOrSecondary'] == "S-VOL" and int(result[0]['pvolLdevId']) == pvol)
Check if the volume have the pair of the snapshot.
Check if the volume have the pair of the snapshot.
[ "Check", "if", "the", "volume", "have", "the", "pair", "of", "the", "snapshot", "." ]
def has_snap_pair(self, pvol, svol): """Check if the volume have the pair of the snapshot.""" ldev_info = self.get_ldev_info(['status', 'attributes'], svol) if (ldev_info['status'] != NORMAL_STS or PAIR_ATTR not in ldev_info['attributes']): return False params_s = {"svolLdevId": svol} result = self.client.get_snapshots(params_s) if not result: return False return (result[0]['primaryOrSecondary'] == "S-VOL" and int(result[0]['pvolLdevId']) == pvol)
[ "def", "has_snap_pair", "(", "self", ",", "pvol", ",", "svol", ")", ":", "ldev_info", "=", "self", ".", "get_ldev_info", "(", "[", "'status'", ",", "'attributes'", "]", ",", "svol", ")", "if", "(", "ldev_info", "[", "'status'", "]", "!=", "NORMAL_STS", "or", "PAIR_ATTR", "not", "in", "ldev_info", "[", "'attributes'", "]", ")", ":", "return", "False", "params_s", "=", "{", "\"svolLdevId\"", ":", "svol", "}", "result", "=", "self", ".", "client", ".", "get_snapshots", "(", "params_s", ")", "if", "not", "result", ":", "return", "False", "return", "(", "result", "[", "0", "]", "[", "'primaryOrSecondary'", "]", "==", "\"S-VOL\"", "and", "int", "(", "result", "[", "0", "]", "[", "'pvolLdevId'", "]", ")", "==", "pvol", ")" ]
https://github.com/openstack/cinder/blob/23494a6d6c51451688191e1847a458f1d3cdcaa5/cinder/volume/drivers/hitachi/hbsd_rest.py#L785-L796
HypothesisWorks/hypothesis
d1bfc4acc86899caa7a40f892322e1a69fbf36f4
hypothesis-python/src/hypothesis/strategies/_internal/regex.py
python
CharactersBuilder.add_char
(self, char)
Add given char to the whitelist.
Add given char to the whitelist.
[ "Add", "given", "char", "to", "the", "whitelist", "." ]
def add_char(self, char): """Add given char to the whitelist.""" c = self.code_to_char(char) self._whitelist_chars.add(c) if ( self._ignorecase and re.match(re.escape(c), c.swapcase(), flags=re.IGNORECASE) is not None ): # Note that it is possible that `len(c.swapcase()) > 1` self._whitelist_chars.add(c.swapcase())
[ "def", "add_char", "(", "self", ",", "char", ")", ":", "c", "=", "self", ".", "code_to_char", "(", "char", ")", "self", ".", "_whitelist_chars", ".", "add", "(", "c", ")", "if", "(", "self", ".", "_ignorecase", "and", "re", ".", "match", "(", "re", ".", "escape", "(", "c", ")", ",", "c", ".", "swapcase", "(", ")", ",", "flags", "=", "re", ".", "IGNORECASE", ")", "is", "not", "None", ")", ":", "# Note that it is possible that `len(c.swapcase()) > 1`", "self", ".", "_whitelist_chars", ".", "add", "(", "c", ".", "swapcase", "(", ")", ")" ]
https://github.com/HypothesisWorks/hypothesis/blob/d1bfc4acc86899caa7a40f892322e1a69fbf36f4/hypothesis-python/src/hypothesis/strategies/_internal/regex.py#L161-L170
deepgully/me
f7ad65edc2fe435310c6676bc2e322cfe5d4c8f0
libs/sqlalchemy/dialects/mysql/oursql.py
python
MySQLDialect_oursql.do_execute
(self, cursor, statement, parameters, context=None)
Provide an implementation of *cursor.execute(statement, parameters)*.
Provide an implementation of *cursor.execute(statement, parameters)*.
[ "Provide", "an", "implementation", "of", "*", "cursor", ".", "execute", "(", "statement", "parameters", ")", "*", "." ]
def do_execute(self, cursor, statement, parameters, context=None): """Provide an implementation of *cursor.execute(statement, parameters)*.""" if context and context.plain_query: cursor.execute(statement, plain_query=True) else: cursor.execute(statement, parameters)
[ "def", "do_execute", "(", "self", ",", "cursor", ",", "statement", ",", "parameters", ",", "context", "=", "None", ")", ":", "if", "context", "and", "context", ".", "plain_query", ":", "cursor", ".", "execute", "(", "statement", ",", "plain_query", "=", "True", ")", "else", ":", "cursor", ".", "execute", "(", "statement", ",", "parameters", ")" ]
https://github.com/deepgully/me/blob/f7ad65edc2fe435310c6676bc2e322cfe5d4c8f0/libs/sqlalchemy/dialects/mysql/oursql.py#L81-L87
pyproj4/pyproj
24eade78c52f8bf6717e56fb7c878f7da9892368
pyproj/crs/crs.py
python
CRS.target_crs
(self)
return ( None if self._crs.target_crs is None else self.__class__(self._crs.target_crs) )
.. versionadded:: 2.2.0 Returns ------- CRS: The hub CRS of a BoundCRS or the target CRS of a CoordinateOperation.
.. versionadded:: 2.2.0
[ "..", "versionadded", "::", "2", ".", "2", ".", "0" ]
def target_crs(self) -> Optional["CRS"]: """ .. versionadded:: 2.2.0 Returns ------- CRS: The hub CRS of a BoundCRS or the target CRS of a CoordinateOperation. """ return ( None if self._crs.target_crs is None else self.__class__(self._crs.target_crs) )
[ "def", "target_crs", "(", "self", ")", "->", "Optional", "[", "\"CRS\"", "]", ":", "return", "(", "None", "if", "self", ".", "_crs", ".", "target_crs", "is", "None", "else", "self", ".", "__class__", "(", "self", ".", "_crs", ".", "target_crs", ")", ")" ]
https://github.com/pyproj4/pyproj/blob/24eade78c52f8bf6717e56fb7c878f7da9892368/pyproj/crs/crs.py#L998-L1012