nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
OUCMachineLearning/OUCML
5b54337d7c0316084cb1a74befda2bba96137d4a
One_Day_One_GAN/day1/gan/gan.py
python
GAN.__init__
(self)
[]
def __init__(self): self.img_rows = 28 self.img_cols = 28 self.channels = 1 self.img_shape = (self.img_rows, self.img_cols, self.channels) self.latent_dim = 100 optimizer = Adam(0.0002, 0.5) # Build and compile the discriminator self.discriminator = self.build_discriminator() self.discriminator.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy']) # Build the generator self.generator = self.build_generator() # The generator takes noise as input and generates imgs z = Input(shape=(self.latent_dim,)) img = self.generator(z) # For the combined model we will only train the generator self.discriminator.trainable = False # The discriminator takes generated images as input and determines validity validity = self.discriminator(img) # The combined model (stacked generator and discriminator) # Trains the generator to fool the discriminator self.combined = Model(z, validity) self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)
[ "def", "__init__", "(", "self", ")", ":", "self", ".", "img_rows", "=", "28", "self", ".", "img_cols", "=", "28", "self", ".", "channels", "=", "1", "self", ".", "img_shape", "=", "(", "self", ".", "img_rows", ",", "self", ".", "img_cols", ",", "self", ".", "channels", ")", "self", ".", "latent_dim", "=", "100", "optimizer", "=", "Adam", "(", "0.0002", ",", "0.5", ")", "# Build and compile the discriminator", "self", ".", "discriminator", "=", "self", ".", "build_discriminator", "(", ")", "self", ".", "discriminator", ".", "compile", "(", "loss", "=", "'binary_crossentropy'", ",", "optimizer", "=", "optimizer", ",", "metrics", "=", "[", "'accuracy'", "]", ")", "# Build the generator", "self", ".", "generator", "=", "self", ".", "build_generator", "(", ")", "# The generator takes noise as input and generates imgs", "z", "=", "Input", "(", "shape", "=", "(", "self", ".", "latent_dim", ",", ")", ")", "img", "=", "self", ".", "generator", "(", "z", ")", "# For the combined model we will only train the generator", "self", ".", "discriminator", ".", "trainable", "=", "False", "# The discriminator takes generated images as input and determines validity", "validity", "=", "self", ".", "discriminator", "(", "img", ")", "# The combined model (stacked generator and discriminator)", "# Trains the generator to fool the discriminator", "self", ".", "combined", "=", "Model", "(", "z", ",", "validity", ")", "self", ".", "combined", ".", "compile", "(", "loss", "=", "'binary_crossentropy'", ",", "optimizer", "=", "optimizer", ")" ]
https://github.com/OUCMachineLearning/OUCML/blob/5b54337d7c0316084cb1a74befda2bba96137d4a/One_Day_One_GAN/day1/gan/gan.py#L19-L50
pik-copan/pyunicorn
b18316fc08ef34b434a1a4d69dfe3e57e24435ee
pyunicorn/timeseries/recurrence_plot.py
python
RecurrencePlot.legendre_coordinates
(x, dim=3, t=None, p=None, tau_w="est")
return y
Return a phase space trajectory reconstructed using orthogonal polynomial filters. The reconstructed state vector components are the zero-th to (dim-1)-th derivatives of the (possibly irregularly spaced) time series x as estimated by folding with the orthogonal polynomial filters that correspond to the sequence of measurement time points t. This is a generalization for irregularly spaced time series of the "Legendre coordinates" introduced in Gibson et al. (1992). :arg array-like x: Time series values :arg int dim: Dimension > 0 of reconstructed phase space. Default: 3 :type t: array-like or None :arg t: Optional array of measurement time points corresponding to the values in x. Default: [0,...,x.size-1] :type p: int > 0 or None :arg p: No. of past and future time points to use for the estimation. Default: dim or determined by tau_w if given :type tau_w: float > 0 or "est" or None :arg tau_w: Optional (average) window width to use in determining p when p = None. Following Gibson et al. (1992), this should be about sqrt(3/< x**2 >) * std(x), or about a quarter period. If "est", this is estimated iteratively, starting with 4 * (max(t)-min(t)) / (N-1) and estimating x' from that. :rtype: 2D array [observation index, dimension index] :return: Estimated derivatives. Rows are reconstructed state vectors.
Return a phase space trajectory reconstructed using orthogonal polynomial filters.
[ "Return", "a", "phase", "space", "trajectory", "reconstructed", "using", "orthogonal", "polynomial", "filters", "." ]
def legendre_coordinates(x, dim=3, t=None, p=None, tau_w="est"): """ Return a phase space trajectory reconstructed using orthogonal polynomial filters. The reconstructed state vector components are the zero-th to (dim-1)-th derivatives of the (possibly irregularly spaced) time series x as estimated by folding with the orthogonal polynomial filters that correspond to the sequence of measurement time points t. This is a generalization for irregularly spaced time series of the "Legendre coordinates" introduced in Gibson et al. (1992). :arg array-like x: Time series values :arg int dim: Dimension > 0 of reconstructed phase space. Default: 3 :type t: array-like or None :arg t: Optional array of measurement time points corresponding to the values in x. Default: [0,...,x.size-1] :type p: int > 0 or None :arg p: No. of past and future time points to use for the estimation. Default: dim or determined by tau_w if given :type tau_w: float > 0 or "est" or None :arg tau_w: Optional (average) window width to use in determining p when p = None. Following Gibson et al. (1992), this should be about sqrt(3/< x**2 >) * std(x), or about a quarter period. If "est", this is estimated iteratively, starting with 4 * (max(t)-min(t)) / (N-1) and estimating x' from that. :rtype: 2D array [observation index, dimension index] :return: Estimated derivatives. Rows are reconstructed state vectors. """ x = np.array(x).flatten() N = x.size # time points: if t is None: t = np.arange(N) if p is None: if tau_w == "est": tau_w = 4 * (t.max() - t.min()) / (N-1) for i in range(5): y0 = RecurrencePlot.legendre_coordinates(x, dim=2, t=t, tau_w=tau_w) tau_w = np.sqrt(3*x.var()/(y0[:, 1]**2).mean()) print("tau_w set to", tau_w) if tau_w is None: p = dim else: p = 1 while (t[2*p+1:] - t[:-(2*p+1)]).mean() < tau_w and p < N/4: p += 1 print("p set to", p) m = 2*p + 1 N1 = N - m + 1 # time differences: dt = np.zeros((N1, m)) for i in range(N1): dt[i, :] = t[i:i+m] - t[i+p] # filter weights # = recursively computed values of orthogonal polynomials: r = np.zeros((N1, dim, m)) for j in range(dim): r[:, j, :] = dt**j - ( r[:, :j, :] * ((dt**j).reshape((N1, 1, m)) * r[:, :j, :]).sum( axis=2).reshape((N1, j, 1))).sum(axis=1) r[:, j, :] /= np.sqrt((r[:, j, :]**2).sum(axis=1)).reshape((N1, 1)) for j in range(dim): r[:, j, :] *= factorial(j) / \ (r[:, j, :] * dt**j).sum(axis=1).reshape((-1, 1)) # reconstructed state vectors = filtered time series values: y = np.zeros((N1, dim)) for i in range(N1): y[i, :] = (r[i, :, :]*x[i:i+m].reshape((1, m))).sum(axis=1) return y
[ "def", "legendre_coordinates", "(", "x", ",", "dim", "=", "3", ",", "t", "=", "None", ",", "p", "=", "None", ",", "tau_w", "=", "\"est\"", ")", ":", "x", "=", "np", ".", "array", "(", "x", ")", ".", "flatten", "(", ")", "N", "=", "x", ".", "size", "# time points:", "if", "t", "is", "None", ":", "t", "=", "np", ".", "arange", "(", "N", ")", "if", "p", "is", "None", ":", "if", "tau_w", "==", "\"est\"", ":", "tau_w", "=", "4", "*", "(", "t", ".", "max", "(", ")", "-", "t", ".", "min", "(", ")", ")", "/", "(", "N", "-", "1", ")", "for", "i", "in", "range", "(", "5", ")", ":", "y0", "=", "RecurrencePlot", ".", "legendre_coordinates", "(", "x", ",", "dim", "=", "2", ",", "t", "=", "t", ",", "tau_w", "=", "tau_w", ")", "tau_w", "=", "np", ".", "sqrt", "(", "3", "*", "x", ".", "var", "(", ")", "/", "(", "y0", "[", ":", ",", "1", "]", "**", "2", ")", ".", "mean", "(", ")", ")", "print", "(", "\"tau_w set to\"", ",", "tau_w", ")", "if", "tau_w", "is", "None", ":", "p", "=", "dim", "else", ":", "p", "=", "1", "while", "(", "t", "[", "2", "*", "p", "+", "1", ":", "]", "-", "t", "[", ":", "-", "(", "2", "*", "p", "+", "1", ")", "]", ")", ".", "mean", "(", ")", "<", "tau_w", "and", "p", "<", "N", "/", "4", ":", "p", "+=", "1", "print", "(", "\"p set to\"", ",", "p", ")", "m", "=", "2", "*", "p", "+", "1", "N1", "=", "N", "-", "m", "+", "1", "# time differences:", "dt", "=", "np", ".", "zeros", "(", "(", "N1", ",", "m", ")", ")", "for", "i", "in", "range", "(", "N1", ")", ":", "dt", "[", "i", ",", ":", "]", "=", "t", "[", "i", ":", "i", "+", "m", "]", "-", "t", "[", "i", "+", "p", "]", "# filter weights", "# = recursively computed values of orthogonal polynomials:", "r", "=", "np", ".", "zeros", "(", "(", "N1", ",", "dim", ",", "m", ")", ")", "for", "j", "in", "range", "(", "dim", ")", ":", "r", "[", ":", ",", "j", ",", ":", "]", "=", "dt", "**", "j", "-", "(", "r", "[", ":", ",", ":", "j", ",", ":", "]", "*", "(", "(", "dt", "**", "j", ")", ".", "reshape", "(", "(", "N1", ",", "1", ",", "m", ")", ")", "*", "r", "[", ":", ",", ":", "j", ",", ":", "]", ")", ".", "sum", "(", "axis", "=", "2", ")", ".", "reshape", "(", "(", "N1", ",", "j", ",", "1", ")", ")", ")", ".", "sum", "(", "axis", "=", "1", ")", "r", "[", ":", ",", "j", ",", ":", "]", "/=", "np", ".", "sqrt", "(", "(", "r", "[", ":", ",", "j", ",", ":", "]", "**", "2", ")", ".", "sum", "(", "axis", "=", "1", ")", ")", ".", "reshape", "(", "(", "N1", ",", "1", ")", ")", "for", "j", "in", "range", "(", "dim", ")", ":", "r", "[", ":", ",", "j", ",", ":", "]", "*=", "factorial", "(", "j", ")", "/", "(", "r", "[", ":", ",", "j", ",", ":", "]", "*", "dt", "**", "j", ")", ".", "sum", "(", "axis", "=", "1", ")", ".", "reshape", "(", "(", "-", "1", ",", "1", ")", ")", "# reconstructed state vectors = filtered time series values:", "y", "=", "np", ".", "zeros", "(", "(", "N1", ",", "dim", ")", ")", "for", "i", "in", "range", "(", "N1", ")", ":", "y", "[", "i", ",", ":", "]", "=", "(", "r", "[", "i", ",", ":", ",", ":", "]", "*", "x", "[", "i", ":", "i", "+", "m", "]", ".", "reshape", "(", "(", "1", ",", "m", ")", ")", ")", ".", "sum", "(", "axis", "=", "1", ")", "return", "y" ]
https://github.com/pik-copan/pyunicorn/blob/b18316fc08ef34b434a1a4d69dfe3e57e24435ee/pyunicorn/timeseries/recurrence_plot.py#L334-L412
PaddlePaddle/ERNIE
15eddb022ce1beb281777e9ab8807a1bdfa7a76e
propeller/service/utils.py
python
nparray_list_deserialize
(string)
return [slot_to_numpy(slot) for slot in slots.slots]
doc
doc
[ "doc" ]
def nparray_list_deserialize(string): """doc""" slots = interface_pb2.Slots() slots.ParseFromString(string) return [slot_to_numpy(slot) for slot in slots.slots]
[ "def", "nparray_list_deserialize", "(", "string", ")", ":", "slots", "=", "interface_pb2", ".", "Slots", "(", ")", "slots", ".", "ParseFromString", "(", "string", ")", "return", "[", "slot_to_numpy", "(", "slot", ")", "for", "slot", "in", "slots", ".", "slots", "]" ]
https://github.com/PaddlePaddle/ERNIE/blob/15eddb022ce1beb281777e9ab8807a1bdfa7a76e/propeller/service/utils.py#L113-L117
biolab/orange2
db40a9449cb45b507d63dcd5739b223f9cffb8e6
Orange/OrangeWidgets/Data/OWOutliers.py
python
OWOutliers.cdistance
(self, distances)
handles distance matrix input signal
handles distance matrix input signal
[ "handles", "distance", "matrix", "input", "signal" ]
def cdistance(self, distances): """handles distance matrix input signal""" self.distanceMatrix = distances self.dataChange()
[ "def", "cdistance", "(", "self", ",", "distances", ")", ":", "self", ".", "distanceMatrix", "=", "distances", "self", ".", "dataChange", "(", ")" ]
https://github.com/biolab/orange2/blob/db40a9449cb45b507d63dcd5739b223f9cffb8e6/Orange/OrangeWidgets/Data/OWOutliers.py#L128-L131
buke/GreenOdoo
3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df
runtime/python/lib/python2.7/site-packages/distribute-0.6.34-py2.7.egg/pkg_resources.py
python
Environment.__iadd__
(self, other)
return self
In-place addition of a distribution or environment
In-place addition of a distribution or environment
[ "In", "-", "place", "addition", "of", "a", "distribution", "or", "environment" ]
def __iadd__(self, other): """In-place addition of a distribution or environment""" if isinstance(other,Distribution): self.add(other) elif isinstance(other,Environment): for project in other: for dist in other[project]: self.add(dist) else: raise TypeError("Can't add %r to environment" % (other,)) return self
[ "def", "__iadd__", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "Distribution", ")", ":", "self", ".", "add", "(", "other", ")", "elif", "isinstance", "(", "other", ",", "Environment", ")", ":", "for", "project", "in", "other", ":", "for", "dist", "in", "other", "[", "project", "]", ":", "self", ".", "add", "(", "dist", ")", "else", ":", "raise", "TypeError", "(", "\"Can't add %r to environment\"", "%", "(", "other", ",", ")", ")", "return", "self" ]
https://github.com/buke/GreenOdoo/blob/3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df/runtime/python/lib/python2.7/site-packages/distribute-0.6.34-py2.7.egg/pkg_resources.py#L853-L863
pikpikcu/Pentest-Tools-Framework
cd6e6107764a809943dc4e073cde8149c1a2cd03
modules/xsser/core/gtkcontroller.py
python
Controller.on_automatic_payload_toggled
(self, widget)
Syn. automatic_payload mode with other automatic switches
Syn. automatic_payload mode with other automatic switches
[ "Syn", ".", "automatic_payload", "mode", "with", "other", "automatic", "switches" ]
def on_automatic_payload_toggled(self, widget): """ Syn. automatic_payload mode with other automatic switches """ automatic = self.wTree.get_object('automatic') automatic_payload = self.wTree.get_object('automatic_payload') if automatic_payload.get_property('active') == True: automatic.set_property('active', True) else: automatic.set_property('active', False)
[ "def", "on_automatic_payload_toggled", "(", "self", ",", "widget", ")", ":", "automatic", "=", "self", ".", "wTree", ".", "get_object", "(", "'automatic'", ")", "automatic_payload", "=", "self", ".", "wTree", ".", "get_object", "(", "'automatic_payload'", ")", "if", "automatic_payload", ".", "get_property", "(", "'active'", ")", "==", "True", ":", "automatic", ".", "set_property", "(", "'active'", ",", "True", ")", "else", ":", "automatic", ".", "set_property", "(", "'active'", ",", "False", ")" ]
https://github.com/pikpikcu/Pentest-Tools-Framework/blob/cd6e6107764a809943dc4e073cde8149c1a2cd03/modules/xsser/core/gtkcontroller.py#L587-L596
sympy/sympy
d822fcba181155b85ff2b29fe525adbafb22b448
sympy/integrals/transforms.py
python
_laplace_rule_trig
(f, t, s, doit=True, **hints)
return None
This internal helper function tries to transform a product containing a trigonometric function (`sin`, `cos`, `sinh`, `cosh`, ) and returns `None` if it cannot do it.
This internal helper function tries to transform a product containing a trigonometric function (`sin`, `cos`, `sinh`, `cosh`, ) and returns `None` if it cannot do it.
[ "This", "internal", "helper", "function", "tries", "to", "transform", "a", "product", "containing", "a", "trigonometric", "function", "(", "sin", "cos", "sinh", "cosh", ")", "and", "returns", "None", "if", "it", "cannot", "do", "it", "." ]
def _laplace_rule_trig(f, t, s, doit=True, **hints): """ This internal helper function tries to transform a product containing a trigonometric function (`sin`, `cos`, `sinh`, `cosh`, ) and returns `None` if it cannot do it. """ _simplify = hints.pop('simplify', True) a = Wild('a', exclude=[t]) y = Wild('y') z = Wild('z') k, func = f.as_independent(t, as_Add=False) # All of the rules have a very similar form: trig(y)*z is matched, and then # two copies of the Laplace transform of z are shifted in the s Domain # and added with a weight; see rules 1.6 to 1.9 in # http://eqworld.ipmnet.ru/en/auxiliary/inttrans/laplace1.pdf # The parameters in the tuples are (fm, nu, s1, s2, sd): # fm: Function to match # nu: Number of the rule, for debug purposes # s1: weight of the sum, 'I' for sin and '1' for all others # s2: sign of the second copy of the Laplace transform of z # sd: shift direction; shift along real or imaginary axis if `1` or `I` trigrules = [(sinh(y), '1.6', 1, -1, 1), (cosh(y), '1.7', 1, 1, 1), (sin(y), '1.8', -I, -1, I), (cos(y), '1.9', 1, 1, I)] for trigrule in trigrules: fm, nu, s1, s2, sd = trigrule ma1 = func.match(fm*z) if ma1: ma2 = ma1[y].collect(t).match(a*t) if ma2: debug('_laplace_apply_rules match:') debug(' f: %s ( %s, %s )'%(f, ma1, ma2)) debug(' rule: multiply with %s (%s)'%(fm.func, nu)) L = _laplace_apply_rules(ma1[z], t, s, doit=doit, **hints) try: r, p, c = L # The convergence plane changes only if the shift has been # done along the real axis: if sd==1: cp_shift = Abs(ma2[a]) else: cp_shift = 0 return ((s1*(r.subs(s, s-sd*ma2[a])+\ s2*r.subs(s, s+sd*ma2[a]))).simplify()/2, p+cp_shift, c) except TypeError: if doit==True and _simplify==True: return (s1*(L.subs(s, s-sd*ma2[a])+\ s2*L.subs(s, s+sd*ma2[a]))).simplify()/2 else: return (s1*(L.subs(s, s-sd*ma2[a])+\ s2*L.subs(s, s+sd*ma2[a])))/2 return None
[ "def", "_laplace_rule_trig", "(", "f", ",", "t", ",", "s", ",", "doit", "=", "True", ",", "*", "*", "hints", ")", ":", "_simplify", "=", "hints", ".", "pop", "(", "'simplify'", ",", "True", ")", "a", "=", "Wild", "(", "'a'", ",", "exclude", "=", "[", "t", "]", ")", "y", "=", "Wild", "(", "'y'", ")", "z", "=", "Wild", "(", "'z'", ")", "k", ",", "func", "=", "f", ".", "as_independent", "(", "t", ",", "as_Add", "=", "False", ")", "# All of the rules have a very similar form: trig(y)*z is matched, and then", "# two copies of the Laplace transform of z are shifted in the s Domain", "# and added with a weight; see rules 1.6 to 1.9 in", "# http://eqworld.ipmnet.ru/en/auxiliary/inttrans/laplace1.pdf", "# The parameters in the tuples are (fm, nu, s1, s2, sd):", "# fm: Function to match", "# nu: Number of the rule, for debug purposes", "# s1: weight of the sum, 'I' for sin and '1' for all others", "# s2: sign of the second copy of the Laplace transform of z", "# sd: shift direction; shift along real or imaginary axis if `1` or `I`", "trigrules", "=", "[", "(", "sinh", "(", "y", ")", ",", "'1.6'", ",", "1", ",", "-", "1", ",", "1", ")", ",", "(", "cosh", "(", "y", ")", ",", "'1.7'", ",", "1", ",", "1", ",", "1", ")", ",", "(", "sin", "(", "y", ")", ",", "'1.8'", ",", "-", "I", ",", "-", "1", ",", "I", ")", ",", "(", "cos", "(", "y", ")", ",", "'1.9'", ",", "1", ",", "1", ",", "I", ")", "]", "for", "trigrule", "in", "trigrules", ":", "fm", ",", "nu", ",", "s1", ",", "s2", ",", "sd", "=", "trigrule", "ma1", "=", "func", ".", "match", "(", "fm", "*", "z", ")", "if", "ma1", ":", "ma2", "=", "ma1", "[", "y", "]", ".", "collect", "(", "t", ")", ".", "match", "(", "a", "*", "t", ")", "if", "ma2", ":", "debug", "(", "'_laplace_apply_rules match:'", ")", "debug", "(", "' f: %s ( %s, %s )'", "%", "(", "f", ",", "ma1", ",", "ma2", ")", ")", "debug", "(", "' rule: multiply with %s (%s)'", "%", "(", "fm", ".", "func", ",", "nu", ")", ")", "L", "=", "_laplace_apply_rules", "(", "ma1", "[", "z", "]", ",", "t", ",", "s", ",", "doit", "=", "doit", ",", "*", "*", "hints", ")", "try", ":", "r", ",", "p", ",", "c", "=", "L", "# The convergence plane changes only if the shift has been", "# done along the real axis:", "if", "sd", "==", "1", ":", "cp_shift", "=", "Abs", "(", "ma2", "[", "a", "]", ")", "else", ":", "cp_shift", "=", "0", "return", "(", "(", "s1", "*", "(", "r", ".", "subs", "(", "s", ",", "s", "-", "sd", "*", "ma2", "[", "a", "]", ")", "+", "s2", "*", "r", ".", "subs", "(", "s", ",", "s", "+", "sd", "*", "ma2", "[", "a", "]", ")", ")", ")", ".", "simplify", "(", ")", "/", "2", ",", "p", "+", "cp_shift", ",", "c", ")", "except", "TypeError", ":", "if", "doit", "==", "True", "and", "_simplify", "==", "True", ":", "return", "(", "s1", "*", "(", "L", ".", "subs", "(", "s", ",", "s", "-", "sd", "*", "ma2", "[", "a", "]", ")", "+", "s2", "*", "L", ".", "subs", "(", "s", ",", "s", "+", "sd", "*", "ma2", "[", "a", "]", ")", ")", ")", ".", "simplify", "(", ")", "/", "2", "else", ":", "return", "(", "s1", "*", "(", "L", ".", "subs", "(", "s", ",", "s", "-", "sd", "*", "ma2", "[", "a", "]", ")", "+", "s2", "*", "L", ".", "subs", "(", "s", ",", "s", "+", "sd", "*", "ma2", "[", "a", "]", ")", ")", ")", "/", "2", "return", "None" ]
https://github.com/sympy/sympy/blob/d822fcba181155b85ff2b29fe525adbafb22b448/sympy/integrals/transforms.py#L1661-L1712
galaxyproject/galaxy
4c03520f05062e0f4a1b3655dc0b7452fda69943
lib/galaxy/jobs/runners/__init__.py
python
AsynchronousJobRunner.finish_job
(self, job_state)
Get the output/error for a finished job, pass to `job_wrapper.finish` and cleanup all the job's temporary files.
Get the output/error for a finished job, pass to `job_wrapper.finish` and cleanup all the job's temporary files.
[ "Get", "the", "output", "/", "error", "for", "a", "finished", "job", "pass", "to", "job_wrapper", ".", "finish", "and", "cleanup", "all", "the", "job", "s", "temporary", "files", "." ]
def finish_job(self, job_state): """ Get the output/error for a finished job, pass to `job_wrapper.finish` and cleanup all the job's temporary files. """ galaxy_id_tag = job_state.job_wrapper.get_id_tag() external_job_id = job_state.job_id # To ensure that files below are readable, ownership must be reclaimed first job_state.job_wrapper.reclaim_ownership() # wait for the files to appear which_try = 0 collect_output_success = True while which_try < self.app.config.retry_job_output_collection + 1: try: with open(job_state.output_file, "rb") as stdout_file, open(job_state.error_file, 'rb') as stderr_file: stdout = self._job_io_for_db(stdout_file) stderr = self._job_io_for_db(stderr_file) break except Exception as e: if which_try == self.app.config.retry_job_output_collection: stdout = '' stderr = job_state.runner_states.JOB_OUTPUT_NOT_RETURNED_FROM_CLUSTER log.error('(%s/%s) %s: %s', galaxy_id_tag, external_job_id, stderr, unicodify(e)) collect_output_success = False else: time.sleep(1) which_try += 1 if not collect_output_success: job_state.fail_message = stderr job_state.runner_state = job_state.runner_states.JOB_OUTPUT_NOT_RETURNED_FROM_CLUSTER self.mark_as_failed(job_state) return self._finish_or_resubmit_job(job_state, stdout, stderr, job_id=galaxy_id_tag, external_job_id=external_job_id)
[ "def", "finish_job", "(", "self", ",", "job_state", ")", ":", "galaxy_id_tag", "=", "job_state", ".", "job_wrapper", ".", "get_id_tag", "(", ")", "external_job_id", "=", "job_state", ".", "job_id", "# To ensure that files below are readable, ownership must be reclaimed first", "job_state", ".", "job_wrapper", ".", "reclaim_ownership", "(", ")", "# wait for the files to appear", "which_try", "=", "0", "collect_output_success", "=", "True", "while", "which_try", "<", "self", ".", "app", ".", "config", ".", "retry_job_output_collection", "+", "1", ":", "try", ":", "with", "open", "(", "job_state", ".", "output_file", ",", "\"rb\"", ")", "as", "stdout_file", ",", "open", "(", "job_state", ".", "error_file", ",", "'rb'", ")", "as", "stderr_file", ":", "stdout", "=", "self", ".", "_job_io_for_db", "(", "stdout_file", ")", "stderr", "=", "self", ".", "_job_io_for_db", "(", "stderr_file", ")", "break", "except", "Exception", "as", "e", ":", "if", "which_try", "==", "self", ".", "app", ".", "config", ".", "retry_job_output_collection", ":", "stdout", "=", "''", "stderr", "=", "job_state", ".", "runner_states", ".", "JOB_OUTPUT_NOT_RETURNED_FROM_CLUSTER", "log", ".", "error", "(", "'(%s/%s) %s: %s'", ",", "galaxy_id_tag", ",", "external_job_id", ",", "stderr", ",", "unicodify", "(", "e", ")", ")", "collect_output_success", "=", "False", "else", ":", "time", ".", "sleep", "(", "1", ")", "which_try", "+=", "1", "if", "not", "collect_output_success", ":", "job_state", ".", "fail_message", "=", "stderr", "job_state", ".", "runner_state", "=", "job_state", ".", "runner_states", ".", "JOB_OUTPUT_NOT_RETURNED_FROM_CLUSTER", "self", ".", "mark_as_failed", "(", "job_state", ")", "return", "self", ".", "_finish_or_resubmit_job", "(", "job_state", ",", "stdout", ",", "stderr", ",", "job_id", "=", "galaxy_id_tag", ",", "external_job_id", "=", "external_job_id", ")" ]
https://github.com/galaxyproject/galaxy/blob/4c03520f05062e0f4a1b3655dc0b7452fda69943/lib/galaxy/jobs/runners/__init__.py#L748-L784
buke/GreenOdoo
3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df
source/addons/payment/models/payment_acquirer.py
python
PaymentTransaction.s2s_send
(self, cr, uid, values, cc_values, context=None)
return (tx_id, result)
Create and send server-to-server transaction. :param dict values: transaction values :param dict cc_values: credit card values that are not stored into the payment.transaction object. Acquirers should handle receiving void or incorrect cc values. Should contain : - holder_name - number - cvc - expiry_date - brand - expiry_date_yy - expiry_date_mm
Create and send server-to-server transaction.
[ "Create", "and", "send", "server", "-", "to", "-", "server", "transaction", "." ]
def s2s_send(self, cr, uid, values, cc_values, context=None): """ Create and send server-to-server transaction. :param dict values: transaction values :param dict cc_values: credit card values that are not stored into the payment.transaction object. Acquirers should handle receiving void or incorrect cc values. Should contain : - holder_name - number - cvc - expiry_date - brand - expiry_date_yy - expiry_date_mm """ tx_id, result = None, None if values.get('acquirer_id'): acquirer = self.pool['payment.acquirer'].browse(cr, uid, values.get('acquirer_id'), context=context) custom_method_name = '_%s_s2s_send' % acquirer.provider if hasattr(self, custom_method_name): tx_id, result = getattr(self, custom_method_name)(cr, uid, values, cc_values, context=context) if tx_id is None and result is None: tx_id = super(PaymentTransaction, self).create(cr, uid, values, context=context) return (tx_id, result)
[ "def", "s2s_send", "(", "self", ",", "cr", ",", "uid", ",", "values", ",", "cc_values", ",", "context", "=", "None", ")", ":", "tx_id", ",", "result", "=", "None", ",", "None", "if", "values", ".", "get", "(", "'acquirer_id'", ")", ":", "acquirer", "=", "self", ".", "pool", "[", "'payment.acquirer'", "]", ".", "browse", "(", "cr", ",", "uid", ",", "values", ".", "get", "(", "'acquirer_id'", ")", ",", "context", "=", "context", ")", "custom_method_name", "=", "'_%s_s2s_send'", "%", "acquirer", ".", "provider", "if", "hasattr", "(", "self", ",", "custom_method_name", ")", ":", "tx_id", ",", "result", "=", "getattr", "(", "self", ",", "custom_method_name", ")", "(", "cr", ",", "uid", ",", "values", ",", "cc_values", ",", "context", "=", "context", ")", "if", "tx_id", "is", "None", "and", "result", "is", "None", ":", "tx_id", "=", "super", "(", "PaymentTransaction", ",", "self", ")", ".", "create", "(", "cr", ",", "uid", ",", "values", ",", "context", "=", "context", ")", "return", "(", "tx_id", ",", "result", ")" ]
https://github.com/buke/GreenOdoo/blob/3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df/source/addons/payment/models/payment_acquirer.py#L503-L530
Endogen/Telegram-Kraken-Bot
6f799a366a64ddb86b13b742a5c826feeeae3cce
telegram_kraken_bot.py
python
trade_vol_volume
(bot, update, chat_data)
return WorkflowEnum.TRADE_VOLUME
[]
def trade_vol_volume(bot, update, chat_data): chat_data["vol_type"] = update.message.text.upper() reply_msg = "Enter volume" cancel_btn = build_menu([KeyboardButton(KeyboardEnum.CANCEL.clean())]) reply_mrk = ReplyKeyboardMarkup(cancel_btn, resize_keyboard=True) update.message.reply_text(reply_msg, reply_markup=reply_mrk) return WorkflowEnum.TRADE_VOLUME
[ "def", "trade_vol_volume", "(", "bot", ",", "update", ",", "chat_data", ")", ":", "chat_data", "[", "\"vol_type\"", "]", "=", "update", ".", "message", ".", "text", ".", "upper", "(", ")", "reply_msg", "=", "\"Enter volume\"", "cancel_btn", "=", "build_menu", "(", "[", "KeyboardButton", "(", "KeyboardEnum", ".", "CANCEL", ".", "clean", "(", ")", ")", "]", ")", "reply_mrk", "=", "ReplyKeyboardMarkup", "(", "cancel_btn", ",", "resize_keyboard", "=", "True", ")", "update", ".", "message", ".", "reply_text", "(", "reply_msg", ",", "reply_markup", "=", "reply_mrk", ")", "return", "WorkflowEnum", ".", "TRADE_VOLUME" ]
https://github.com/Endogen/Telegram-Kraken-Bot/blob/6f799a366a64ddb86b13b742a5c826feeeae3cce/telegram_kraken_bot.py#L535-L544
facebookresearch/hydra
9b2f4d54b328d1551aa70a241a1d638cbe046367
hydra/_internal/config_loader_impl.py
python
ConfigLoaderImpl.compute_defaults_list
( self, config_name: Optional[str], overrides: List[str], run_mode: RunMode, )
return defaults_list
[]
def compute_defaults_list( self, config_name: Optional[str], overrides: List[str], run_mode: RunMode, ) -> DefaultsList: parser = OverridesParser.create() parsed_overrides = parser.parse_overrides(overrides=overrides) repo = CachingConfigRepository(self.repository) self._process_config_searchpath(config_name, parsed_overrides, repo) defaults_list = create_defaults_list( repo=repo, config_name=config_name, overrides_list=parsed_overrides, prepend_hydra=True, skip_missing=run_mode == RunMode.MULTIRUN, ) return defaults_list
[ "def", "compute_defaults_list", "(", "self", ",", "config_name", ":", "Optional", "[", "str", "]", ",", "overrides", ":", "List", "[", "str", "]", ",", "run_mode", ":", "RunMode", ",", ")", "->", "DefaultsList", ":", "parser", "=", "OverridesParser", ".", "create", "(", ")", "parsed_overrides", "=", "parser", ".", "parse_overrides", "(", "overrides", "=", "overrides", ")", "repo", "=", "CachingConfigRepository", "(", "self", ".", "repository", ")", "self", ".", "_process_config_searchpath", "(", "config_name", ",", "parsed_overrides", ",", "repo", ")", "defaults_list", "=", "create_defaults_list", "(", "repo", "=", "repo", ",", "config_name", "=", "config_name", ",", "overrides_list", "=", "parsed_overrides", ",", "prepend_hydra", "=", "True", ",", "skip_missing", "=", "run_mode", "==", "RunMode", ".", "MULTIRUN", ",", ")", "return", "defaults_list" ]
https://github.com/facebookresearch/hydra/blob/9b2f4d54b328d1551aa70a241a1d638cbe046367/hydra/_internal/config_loader_impl.py#L544-L561
HKUST-KnowComp/ASER
3290e7cad0ec271c3720f4d578cae2c8d17915a4
aser/database/db_connection.py
python
MongoDBConnection.close
(self)
Close the connection safely
Close the connection safely
[ "Close", "the", "connection", "safely" ]
def close(self): """ Close the connection safely """ self._client.close()
[ "def", "close", "(", "self", ")", ":", "self", ".", "_client", ".", "close", "(", ")" ]
https://github.com/HKUST-KnowComp/ASER/blob/3290e7cad0ec271c3720f4d578cae2c8d17915a4/aser/database/db_connection.py#L465-L468
CIRCL/AIL-framework
9c561d482705095f734d4d87fce6b6ab203d7c90
bin/modules/Global.py
python
Global.check_filename
(self, filename, new_file_content)
return filename
Check if file is not a duplicated file return the filename if new file, else None
Check if file is not a duplicated file return the filename if new file, else None
[ "Check", "if", "file", "is", "not", "a", "duplicated", "file", "return", "the", "filename", "if", "new", "file", "else", "None" ]
def check_filename(self, filename, new_file_content): """ Check if file is not a duplicated file return the filename if new file, else None """ # check if file exist if os.path.isfile(filename): self.redis_logger.warning(f'File already exist {filename}') print(f'File already exist {filename}') # Check that file already exists but content differs curr_file_content = self.gunzip_file(filename) if curr_file_content: # Compare file content with message content with MD5 checksums curr_file_md5 = md5(curr_file_content).hexdigest() new_file_md5 = md5(new_file_content).hexdigest() if new_file_md5 != curr_file_md5: # MD5 are not equals, verify filename if filename.endswith('.gz'): filename = f'{filename[:-3]}_{new_file_md5}.gz' else: filename = f'{filename}_{new_file_md5}' self.redis_logger.debug(f'new file to check: {filename}') if os.path.isfile(filename): # Ignore duplicate self.redis_logger.debug(f'ignore duplicated file {filename}') print(f'ignore duplicated file {filename}') filename = None else: # Ignore duplicate checksum equals self.redis_logger.debug(f'ignore duplicated file {filename}') print(f'ignore duplicated file {filename}') filename = None else: # File not unzipped filename = None return filename
[ "def", "check_filename", "(", "self", ",", "filename", ",", "new_file_content", ")", ":", "# check if file exist", "if", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "self", ".", "redis_logger", ".", "warning", "(", "f'File already exist {filename}'", ")", "print", "(", "f'File already exist {filename}'", ")", "# Check that file already exists but content differs", "curr_file_content", "=", "self", ".", "gunzip_file", "(", "filename", ")", "if", "curr_file_content", ":", "# Compare file content with message content with MD5 checksums", "curr_file_md5", "=", "md5", "(", "curr_file_content", ")", ".", "hexdigest", "(", ")", "new_file_md5", "=", "md5", "(", "new_file_content", ")", ".", "hexdigest", "(", ")", "if", "new_file_md5", "!=", "curr_file_md5", ":", "# MD5 are not equals, verify filename", "if", "filename", ".", "endswith", "(", "'.gz'", ")", ":", "filename", "=", "f'{filename[:-3]}_{new_file_md5}.gz'", "else", ":", "filename", "=", "f'{filename}_{new_file_md5}'", "self", ".", "redis_logger", ".", "debug", "(", "f'new file to check: {filename}'", ")", "if", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "# Ignore duplicate", "self", ".", "redis_logger", ".", "debug", "(", "f'ignore duplicated file {filename}'", ")", "print", "(", "f'ignore duplicated file {filename}'", ")", "filename", "=", "None", "else", ":", "# Ignore duplicate checksum equals", "self", ".", "redis_logger", ".", "debug", "(", "f'ignore duplicated file {filename}'", ")", "print", "(", "f'ignore duplicated file {filename}'", ")", "filename", "=", "None", "else", ":", "# File not unzipped", "filename", "=", "None", "return", "filename" ]
https://github.com/CIRCL/AIL-framework/blob/9c561d482705095f734d4d87fce6b6ab203d7c90/bin/modules/Global.py#L141-L185
ibm-research-tokyo/dybm
a6d308c896c2f66680ee9c5d05a3d7826cc27c64
src/pydybm/base/sgd.py
python
ADAM._update_state
(self, gradients, params, func_gradients)
Virtual method updating internal state with current parameters and gradient function Parameters ---------- gradients : Dictionary[str, np.ndarray] Dictionary of gradients. params : Dictionary[str, np.ndarray] Dictionary of parameters. func_gradients : Callable[[Dictionary[str, np.ndarray]], Dictionary[str, np.ndarray]] Function that maps from parameter to gradients.
Virtual method updating internal state with current parameters and gradient function
[ "Virtual", "method", "updating", "internal", "state", "with", "current", "parameters", "and", "gradient", "function" ]
def _update_state(self, gradients, params, func_gradients): """ Virtual method updating internal state with current parameters and gradient function Parameters ---------- gradients : Dictionary[str, np.ndarray] Dictionary of gradients. params : Dictionary[str, np.ndarray] Dictionary of parameters. func_gradients : Callable[[Dictionary[str, np.ndarray]], Dictionary[str, np.ndarray]] Function that maps from parameter to gradients. """ g = gradients for key in g: self.first[key], self.second[key] \ = amath.op.adam_update_state(self.first[key], self.second[key], self.beta, self.gamma, g[key]) self.step += 1
[ "def", "_update_state", "(", "self", ",", "gradients", ",", "params", ",", "func_gradients", ")", ":", "g", "=", "gradients", "for", "key", "in", "g", ":", "self", ".", "first", "[", "key", "]", ",", "self", ".", "second", "[", "key", "]", "=", "amath", ".", "op", ".", "adam_update_state", "(", "self", ".", "first", "[", "key", "]", ",", "self", ".", "second", "[", "key", "]", ",", "self", ".", "beta", ",", "self", ".", "gamma", ",", "g", "[", "key", "]", ")", "self", ".", "step", "+=", "1" ]
https://github.com/ibm-research-tokyo/dybm/blob/a6d308c896c2f66680ee9c5d05a3d7826cc27c64/src/pydybm/base/sgd.py#L661-L682
Yuliang-Liu/Box_Discretization_Network
5b3a30c97429ef8e5c5e1c4e2476c7d9abdc03e6
maskrcnn_benchmark/utils/miscellaneous.py
python
mkdir
(path)
[]
def mkdir(path): try: os.makedirs(path) except OSError as e: if e.errno != errno.EEXIST: raise
[ "def", "mkdir", "(", "path", ")", ":", "try", ":", "os", ".", "makedirs", "(", "path", ")", "except", "OSError", "as", "e", ":", "if", "e", ".", "errno", "!=", "errno", ".", "EEXIST", ":", "raise" ]
https://github.com/Yuliang-Liu/Box_Discretization_Network/blob/5b3a30c97429ef8e5c5e1c4e2476c7d9abdc03e6/maskrcnn_benchmark/utils/miscellaneous.py#L6-L11
hakril/PythonForWindows
61e027a678d5b87aa64fcf8a37a6661a86236589
windows/winobject/task_scheduler.py
python
Task.__repr__
(self)
return """<{0} "{1}" at {2:#x}>""".format(type(self).__name__, self.name, id(self))
[]
def __repr__(self): return """<{0} "{1}" at {2:#x}>""".format(type(self).__name__, self.name, id(self))
[ "def", "__repr__", "(", "self", ")", ":", "return", "\"\"\"<{0} \"{1}\" at {2:#x}>\"\"\"", ".", "format", "(", "type", "(", "self", ")", ".", "__name__", ",", "self", ".", "name", ",", "id", "(", "self", ")", ")" ]
https://github.com/hakril/PythonForWindows/blob/61e027a678d5b87aa64fcf8a37a6661a86236589/windows/winobject/task_scheduler.py#L313-L314
modin-project/modin
0d9d14e6669be3dd6bb3b72222dbe6a6dffe1bee
modin/core/dataframe/pandas/dataframe/dataframe.py
python
PandasDataframe.concat
(self, axis, others, how, sort)
return self.__constructor__( new_partitions, new_index, new_columns, new_lengths, new_widths, new_dtypes )
Concatenate `self` with one or more other Modin DataFrames. Parameters ---------- axis : {0, 1} Axis to concatenate over. others : list List of Modin DataFrames to concatenate with. how : str Type of join to use for the axis. sort : bool Whether sort the result or not. Returns ------- PandasDataframe New Modin DataFrame.
Concatenate `self` with one or more other Modin DataFrames.
[ "Concatenate", "self", "with", "one", "or", "more", "other", "Modin", "DataFrames", "." ]
def concat(self, axis, others, how, sort): """ Concatenate `self` with one or more other Modin DataFrames. Parameters ---------- axis : {0, 1} Axis to concatenate over. others : list List of Modin DataFrames to concatenate with. how : str Type of join to use for the axis. sort : bool Whether sort the result or not. Returns ------- PandasDataframe New Modin DataFrame. """ # Fast path for equivalent columns and partitioning if ( axis == 0 and all(o.columns.equals(self.columns) for o in others) and all(o._column_widths == self._column_widths for o in others) ): joined_index = self.columns left_parts = self._partitions right_parts = [o._partitions for o in others] new_lengths = self._row_lengths + [ length for o in others for length in o._row_lengths ] new_widths = self._column_widths elif ( axis == 1 and all(o.index.equals(self.index) for o in others) and all(o._row_lengths == self._row_lengths for o in others) ): joined_index = self.index left_parts = self._partitions right_parts = [o._partitions for o in others] new_lengths = self._row_lengths new_widths = self._column_widths + [ length for o in others for length in o._column_widths ] else: left_parts, right_parts, joined_index = self._copartition( axis ^ 1, others, how, sort, force_repartition=False ) new_lengths = None new_widths = None new_partitions = self._partition_mgr_cls.concat(axis, left_parts, right_parts) if axis == 0: new_index = self.index.append([other.index for other in others]) new_columns = joined_index # TODO: Can optimize by combining if all dtypes are materialized new_dtypes = None else: new_columns = self.columns.append([other.columns for other in others]) new_index = joined_index if self._dtypes is not None and all(o._dtypes is not None for o in others): new_dtypes = self.dtypes.append([o.dtypes for o in others]) else: new_dtypes = None return self.__constructor__( new_partitions, new_index, new_columns, new_lengths, new_widths, new_dtypes )
[ "def", "concat", "(", "self", ",", "axis", ",", "others", ",", "how", ",", "sort", ")", ":", "# Fast path for equivalent columns and partitioning", "if", "(", "axis", "==", "0", "and", "all", "(", "o", ".", "columns", ".", "equals", "(", "self", ".", "columns", ")", "for", "o", "in", "others", ")", "and", "all", "(", "o", ".", "_column_widths", "==", "self", ".", "_column_widths", "for", "o", "in", "others", ")", ")", ":", "joined_index", "=", "self", ".", "columns", "left_parts", "=", "self", ".", "_partitions", "right_parts", "=", "[", "o", ".", "_partitions", "for", "o", "in", "others", "]", "new_lengths", "=", "self", ".", "_row_lengths", "+", "[", "length", "for", "o", "in", "others", "for", "length", "in", "o", ".", "_row_lengths", "]", "new_widths", "=", "self", ".", "_column_widths", "elif", "(", "axis", "==", "1", "and", "all", "(", "o", ".", "index", ".", "equals", "(", "self", ".", "index", ")", "for", "o", "in", "others", ")", "and", "all", "(", "o", ".", "_row_lengths", "==", "self", ".", "_row_lengths", "for", "o", "in", "others", ")", ")", ":", "joined_index", "=", "self", ".", "index", "left_parts", "=", "self", ".", "_partitions", "right_parts", "=", "[", "o", ".", "_partitions", "for", "o", "in", "others", "]", "new_lengths", "=", "self", ".", "_row_lengths", "new_widths", "=", "self", ".", "_column_widths", "+", "[", "length", "for", "o", "in", "others", "for", "length", "in", "o", ".", "_column_widths", "]", "else", ":", "left_parts", ",", "right_parts", ",", "joined_index", "=", "self", ".", "_copartition", "(", "axis", "^", "1", ",", "others", ",", "how", ",", "sort", ",", "force_repartition", "=", "False", ")", "new_lengths", "=", "None", "new_widths", "=", "None", "new_partitions", "=", "self", ".", "_partition_mgr_cls", ".", "concat", "(", "axis", ",", "left_parts", ",", "right_parts", ")", "if", "axis", "==", "0", ":", "new_index", "=", "self", ".", "index", ".", "append", "(", "[", "other", ".", "index", "for", "other", "in", "others", "]", ")", "new_columns", "=", "joined_index", "# TODO: Can optimize by combining if all dtypes are materialized", "new_dtypes", "=", "None", "else", ":", "new_columns", "=", "self", ".", "columns", ".", "append", "(", "[", "other", ".", "columns", "for", "other", "in", "others", "]", ")", "new_index", "=", "joined_index", "if", "self", ".", "_dtypes", "is", "not", "None", "and", "all", "(", "o", ".", "_dtypes", "is", "not", "None", "for", "o", "in", "others", ")", ":", "new_dtypes", "=", "self", ".", "dtypes", ".", "append", "(", "[", "o", ".", "dtypes", "for", "o", "in", "others", "]", ")", "else", ":", "new_dtypes", "=", "None", "return", "self", ".", "__constructor__", "(", "new_partitions", ",", "new_index", ",", "new_columns", ",", "new_lengths", ",", "new_widths", ",", "new_dtypes", ")" ]
https://github.com/modin-project/modin/blob/0d9d14e6669be3dd6bb3b72222dbe6a6dffe1bee/modin/core/dataframe/pandas/dataframe/dataframe.py#L2039-L2105
oilshell/oil
94388e7d44a9ad879b12615f6203b38596b5a2d3
Python-2.7.13/Lib/plat-freebsd8/IN.py
python
__STRING
(x)
return "x"
[]
def __STRING(x): return "x"
[ "def", "__STRING", "(", "x", ")", ":", "return", "\"x\"" ]
https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/Python-2.7.13/Lib/plat-freebsd8/IN.py#L34-L34
junyanz/BicycleGAN
40b9d52c27b9831f56c1c7c7a6ddde8bc9149067
models/pix2pix_model.py
python
Pix2PixModel.backward_G
(self)
Calculate GAN and L1 loss for the generator
Calculate GAN and L1 loss for the generator
[ "Calculate", "GAN", "and", "L1", "loss", "for", "the", "generator" ]
def backward_G(self): """Calculate GAN and L1 loss for the generator""" # First, G(A) should fake the discriminator fake_AB = torch.cat((self.real_A, self.fake_B), 1) pred_fake = self.netD(fake_AB) self.loss_G_GAN, _ = self.criterionGAN(pred_fake, True) # Second, G(A) = B self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1 # combine loss and calculate gradients self.loss_G = self.loss_G_GAN + self.loss_G_L1 self.loss_G.backward()
[ "def", "backward_G", "(", "self", ")", ":", "# First, G(A) should fake the discriminator", "fake_AB", "=", "torch", ".", "cat", "(", "(", "self", ".", "real_A", ",", "self", ".", "fake_B", ")", ",", "1", ")", "pred_fake", "=", "self", ".", "netD", "(", "fake_AB", ")", "self", ".", "loss_G_GAN", ",", "_", "=", "self", ".", "criterionGAN", "(", "pred_fake", ",", "True", ")", "# Second, G(A) = B", "self", ".", "loss_G_L1", "=", "self", ".", "criterionL1", "(", "self", ".", "fake_B", ",", "self", ".", "real_B", ")", "*", "self", ".", "opt", ".", "lambda_L1", "# combine loss and calculate gradients", "self", ".", "loss_G", "=", "self", ".", "loss_G_GAN", "+", "self", ".", "loss_G_L1", "self", ".", "loss_G", ".", "backward", "(", ")" ]
https://github.com/junyanz/BicycleGAN/blob/40b9d52c27b9831f56c1c7c7a6ddde8bc9149067/models/pix2pix_model.py#L105-L115
heldersepu/GMapCatcher
0fcd85742d54449d679acf52cc019e93fdc402fe
gmapcatcher/pyGPSD/nmea/tcpport.py
python
TcpPort.__open
(self)
Open the nmea port
Open the nmea port
[ "Open", "the", "nmea", "port" ]
def __open(self): """ Open the nmea port """ self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.connect(self.address) self.sock.settimeout(self.timeout)
[ "def", "__open", "(", "self", ")", ":", "self", ".", "sock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "self", ".", "sock", ".", "connect", "(", "self", ".", "address", ")", "self", ".", "sock", ".", "settimeout", "(", "self", ".", "timeout", ")" ]
https://github.com/heldersepu/GMapCatcher/blob/0fcd85742d54449d679acf52cc019e93fdc402fe/gmapcatcher/pyGPSD/nmea/tcpport.py#L34-L38
omergertel/pyformance
b71056eaf9af6cafd3e3c4a416412ae425bdc82e
pyformance/reporters/__init__.py
python
CsvReporter
(*args, **kwargs)
return cls(*args, **kwargs)
[]
def CsvReporter(*args, **kwargs): from .csv_reporter import CsvReporter as cls return cls(*args, **kwargs)
[ "def", "CsvReporter", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", ".", "csv_reporter", "import", "CsvReporter", "as", "cls", "return", "cls", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/omergertel/pyformance/blob/b71056eaf9af6cafd3e3c4a416412ae425bdc82e/pyformance/reporters/__init__.py#L30-L33
cloudera/hue
23f02102d4547c17c32bd5ea0eb24e9eadd657a4
desktop/core/ext-py/pyu2f-0.1.4/pyu2f/hid/windows.py
python
WindowsHidDevice.GetOutReportDataLength
(self)
return self.desc.internal_max_out_report_len - 1
See base class.
See base class.
[ "See", "base", "class", "." ]
def GetOutReportDataLength(self): """See base class.""" return self.desc.internal_max_out_report_len - 1
[ "def", "GetOutReportDataLength", "(", "self", ")", ":", "return", "self", ".", "desc", ".", "internal_max_out_report_len", "-", "1" ]
https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/pyu2f-0.1.4/pyu2f/hid/windows.py#L330-L332
ethz-asl/hfnet
01577845583373470a0cf156a2acd972e9223bc7
hfnet/models/lfnet_utils/det_tools.py
python
get_visibility_mask
(coords)
return visible_mask
Get visible region mask Args: coords: [batch, height, width, 2] Return: visible mask [batch, height, width, 1]
Get visible region mask Args: coords: [batch, height, width, 2] Return: visible mask [batch, height, width, 1]
[ "Get", "visible", "region", "mask", "Args", ":", "coords", ":", "[", "batch", "height", "width", "2", "]", "Return", ":", "visible", "mask", "[", "batch", "height", "width", "1", "]" ]
def get_visibility_mask(coords): """ Get visible region mask Args: coords: [batch, height, width, 2] Return: visible mask [batch, height, width, 1] """ coords_x, coords_y = tf.split(coords, [1, 1], axis=3) coords_x = tf.cast(coords_x, 'float32') coords_y = tf.cast(coords_y, 'float32') x0 = tf.cast(tf.floor(coords_x), tf.int32) x1 = x0 + 1 y0 = tf.cast(tf.floor(coords_y), tf.int32) y1 = y0 + 1 height = tf.shape(coords)[1] width = tf.shape(coords)[2] zero = tf.zeros([1], dtype=tf.int32) inside_x = tf.logical_and(tf.greater_equal(x0, zero), tf.less(x1, width)) inside_y = tf.logical_and(tf.greater_equal(y0, zero), tf.less(y1, height)) visible_mask = tf.cast(tf.logical_and(inside_x, inside_y), tf.float32) return visible_mask
[ "def", "get_visibility_mask", "(", "coords", ")", ":", "coords_x", ",", "coords_y", "=", "tf", ".", "split", "(", "coords", ",", "[", "1", ",", "1", "]", ",", "axis", "=", "3", ")", "coords_x", "=", "tf", ".", "cast", "(", "coords_x", ",", "'float32'", ")", "coords_y", "=", "tf", ".", "cast", "(", "coords_y", ",", "'float32'", ")", "x0", "=", "tf", ".", "cast", "(", "tf", ".", "floor", "(", "coords_x", ")", ",", "tf", ".", "int32", ")", "x1", "=", "x0", "+", "1", "y0", "=", "tf", ".", "cast", "(", "tf", ".", "floor", "(", "coords_y", ")", ",", "tf", ".", "int32", ")", "y1", "=", "y0", "+", "1", "height", "=", "tf", ".", "shape", "(", "coords", ")", "[", "1", "]", "width", "=", "tf", ".", "shape", "(", "coords", ")", "[", "2", "]", "zero", "=", "tf", ".", "zeros", "(", "[", "1", "]", ",", "dtype", "=", "tf", ".", "int32", ")", "inside_x", "=", "tf", ".", "logical_and", "(", "tf", ".", "greater_equal", "(", "x0", ",", "zero", ")", ",", "tf", ".", "less", "(", "x1", ",", "width", ")", ")", "inside_y", "=", "tf", ".", "logical_and", "(", "tf", ".", "greater_equal", "(", "y0", ",", "zero", ")", ",", "tf", ".", "less", "(", "y1", ",", "height", ")", ")", "visible_mask", "=", "tf", ".", "cast", "(", "tf", ".", "logical_and", "(", "inside_x", ",", "inside_y", ")", ",", "tf", ".", "float32", ")", "return", "visible_mask" ]
https://github.com/ethz-asl/hfnet/blob/01577845583373470a0cf156a2acd972e9223bc7/hfnet/models/lfnet_utils/det_tools.py#L1064-L1089
scragg0x/realms-wiki
ed8c8c374e5ad1850f839547ad541dacaa4b90a3
realms/modules/wiki/models.py
python
Wiki.commit
(self, name, email, message, files)
return self.repo.do_commit(message=message, committer=committer, author=author)
Commit to the underlying git repo. :param name: Committer name :param email: Committer email :param message: Commit message :param files: list of file names that will be staged for commit :return:
Commit to the underlying git repo.
[ "Commit", "to", "the", "underlying", "git", "repo", "." ]
def commit(self, name, email, message, files): """Commit to the underlying git repo. :param name: Committer name :param email: Committer email :param message: Commit message :param files: list of file names that will be staged for commit :return: """ if isinstance(name, text_type): name = name.encode('utf-8') if isinstance(email, text_type): email = email.encode('utf-8') if isinstance(message, text_type): message = message.encode('utf-8') author = committer = "{0} <{1}>".format(name, email).encode() self.repo.stage(files) return self.repo.do_commit(message=message, committer=committer, author=author)
[ "def", "commit", "(", "self", ",", "name", ",", "email", ",", "message", ",", "files", ")", ":", "if", "isinstance", "(", "name", ",", "text_type", ")", ":", "name", "=", "name", ".", "encode", "(", "'utf-8'", ")", "if", "isinstance", "(", "email", ",", "text_type", ")", ":", "email", "=", "email", ".", "encode", "(", "'utf-8'", ")", "if", "isinstance", "(", "message", ",", "text_type", ")", ":", "message", "=", "message", ".", "encode", "(", "'utf-8'", ")", "author", "=", "committer", "=", "\"{0} <{1}>\"", ".", "format", "(", "name", ",", "email", ")", ".", "encode", "(", ")", "self", ".", "repo", ".", "stage", "(", "files", ")", "return", "self", ".", "repo", ".", "do_commit", "(", "message", "=", "message", ",", "committer", "=", "committer", ",", "author", "=", "author", ")" ]
https://github.com/scragg0x/realms-wiki/blob/ed8c8c374e5ad1850f839547ad541dacaa4b90a3/realms/modules/wiki/models.py#L43-L62
splunk/splunk-sdk-python
ef88e9d3e90ab9d6cf48cf940c7376400ed759b8
splunklib/client.py
python
ReadOnlyCollection.__iter__
(self, **kwargs)
Iterate over the entities in the collection. :param kwargs: Additional arguments. :type kwargs: ``dict`` :rtype: iterator over entities. Implemented to give Collection a listish interface. This function always makes a roundtrip to the server, plus at most two additional round trips if the ``autologin`` field of :func:`connect` is set to ``True``. **Example**:: import splunklib.client as client c = client.connect(...) saved_searches = c.saved_searches for entity in saved_searches: print "Saved search named %s" % entity.name
Iterate over the entities in the collection.
[ "Iterate", "over", "the", "entities", "in", "the", "collection", "." ]
def __iter__(self, **kwargs): """Iterate over the entities in the collection. :param kwargs: Additional arguments. :type kwargs: ``dict`` :rtype: iterator over entities. Implemented to give Collection a listish interface. This function always makes a roundtrip to the server, plus at most two additional round trips if the ``autologin`` field of :func:`connect` is set to ``True``. **Example**:: import splunklib.client as client c = client.connect(...) saved_searches = c.saved_searches for entity in saved_searches: print "Saved search named %s" % entity.name """ for item in self.iter(**kwargs): yield item
[ "def", "__iter__", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "item", "in", "self", ".", "iter", "(", "*", "*", "kwargs", ")", ":", "yield", "item" ]
https://github.com/splunk/splunk-sdk-python/blob/ef88e9d3e90ab9d6cf48cf940c7376400ed759b8/splunklib/client.py#L1279-L1301
locationtech-labs/geopyspark
97bcb17a56ed4b4059e2f0dbab97706562cac692
geopyspark/geotrellis/catalog.py
python
AttributeStore.delete
(self, name, zoom=None)
Delete layer and all its attributes Args: name (str): Layer name zoom (int, optional): Layer zoom
Delete layer and all its attributes
[ "Delete", "layer", "and", "all", "its", "attributes" ]
def delete(self, name, zoom=None): """Delete layer and all its attributes Args: name (str): Layer name zoom (int, optional): Layer zoom """ zoom = zoom or 0 self.wrapper.delete(name, zoom)
[ "def", "delete", "(", "self", ",", "name", ",", "zoom", "=", "None", ")", ":", "zoom", "=", "zoom", "or", "0", "self", ".", "wrapper", ".", "delete", "(", "name", ",", "zoom", ")" ]
https://github.com/locationtech-labs/geopyspark/blob/97bcb17a56ed4b4059e2f0dbab97706562cac692/geopyspark/geotrellis/catalog.py#L464-L472
Maicius/QQZoneMood
e529f386865bed141f43c5825a3f1dc7cfa161b9
src/spider/QQZoneSpider.py
python
QQZoneSpider.get_main_page_info
(self)
获取主页信息
获取主页信息
[ "获取主页信息" ]
def get_main_page_info(self): """获取主页信息""" url, url2 = self.get_main_page_url() # self.headers['host'] = 'user.qzone.qq.com' try: res = self.req.get(url=url, headers=self.headers) if self.debug: print("主页信息状态:", res.status_code) content = json.loads(self.get_json(res.content.decode("utf-8"))) data = content['data']['module_16']['data'] self.user_info.mood_num = data['SS'] self.user_info.photo_num = data['XC'] self.user_info.rz_num = data['RZ'] self.mood_num = self.user_info.mood_num if self.mood_num == -1 else self.mood_num if self.use_redis: self.re.set(MOOD_NUM_KEY + self.username, self.mood_num) self.re.rpush(WEB_SPIDER_INFO + self.username, "获取主页信息成功") self.re.rpush(WEB_SPIDER_INFO + self.username, MOOD_NUM_PRE + ":" + str(self.mood_num)) if not self.no_delete: self.re.expire(MOOD_NUM_KEY + self.username, EXPIRE_TIME_IN_SECONDS) if self.debug: print(self.user_info.mood_num) print("Finish to get main page info") except BaseException as e: self.format_error(e, "Failed to get main page info") if self.use_redis: self.re.rpush(WEB_SPIDER_INFO + self.username, GET_MAIN_PAGE_FAILED)
[ "def", "get_main_page_info", "(", "self", ")", ":", "url", ",", "url2", "=", "self", ".", "get_main_page_url", "(", ")", "# self.headers['host'] = 'user.qzone.qq.com'", "try", ":", "res", "=", "self", ".", "req", ".", "get", "(", "url", "=", "url", ",", "headers", "=", "self", ".", "headers", ")", "if", "self", ".", "debug", ":", "print", "(", "\"主页信息状态:\", res.status", "_", "ode", ")", "", "", "content", "=", "json", ".", "loads", "(", "self", ".", "get_json", "(", "res", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ")", "data", "=", "content", "[", "'data'", "]", "[", "'module_16'", "]", "[", "'data'", "]", "self", ".", "user_info", ".", "mood_num", "=", "data", "[", "'SS'", "]", "self", ".", "user_info", ".", "photo_num", "=", "data", "[", "'XC'", "]", "self", ".", "user_info", ".", "rz_num", "=", "data", "[", "'RZ'", "]", "self", ".", "mood_num", "=", "self", ".", "user_info", ".", "mood_num", "if", "self", ".", "mood_num", "==", "-", "1", "else", "self", ".", "mood_num", "if", "self", ".", "use_redis", ":", "self", ".", "re", ".", "set", "(", "MOOD_NUM_KEY", "+", "self", ".", "username", ",", "self", ".", "mood_num", ")", "self", ".", "re", ".", "rpush", "(", "WEB_SPIDER_INFO", "+", "self", ".", "username", ",", "\"获取主页信息成功\")", "", "self", ".", "re", ".", "rpush", "(", "WEB_SPIDER_INFO", "+", "self", ".", "username", ",", "MOOD_NUM_PRE", "+", "\":\"", "+", "str", "(", "self", ".", "mood_num", ")", ")", "if", "not", "self", ".", "no_delete", ":", "self", ".", "re", ".", "expire", "(", "MOOD_NUM_KEY", "+", "self", ".", "username", ",", "EXPIRE_TIME_IN_SECONDS", ")", "if", "self", ".", "debug", ":", "print", "(", "self", ".", "user_info", ".", "mood_num", ")", "print", "(", "\"Finish to get main page info\"", ")", "except", "BaseException", "as", "e", ":", "self", ".", "format_error", "(", "e", ",", "\"Failed to get main page info\"", ")", "if", "self", ".", "use_redis", ":", "self", ".", "re", ".", "rpush", "(", "WEB_SPIDER_INFO", "+", "self", ".", "username", ",", "GET_MAIN_PAGE_FAILED", ")" ]
https://github.com/Maicius/QQZoneMood/blob/e529f386865bed141f43c5825a3f1dc7cfa161b9/src/spider/QQZoneSpider.py#L906-L934
automl/RoBO
91366b12a1a3deb8e80dd08599e0eaf4df28adc1
robo/acquisition_functions/information_gain.py
python
InformationGain.compute
(self, X_test, derivative=False, **kwargs)
Computes the information gain of X and its derivatives Parameters ---------- X_test: np.ndarray(N, D), The input point where the acquisition_functions function should be evaluate. derivative: Boolean If is set to true also the derivative of the acquisition_functions function at X is returned Not tested! Returns ------- np.ndarray(N,) Relative change of entropy of pmin np.ndarray(N,D) Derivatives with respect to X (only if derivative=True)
Computes the information gain of X and its derivatives
[ "Computes", "the", "information", "gain", "of", "X", "and", "its", "derivatives" ]
def compute(self, X_test, derivative=False, **kwargs): """ Computes the information gain of X and its derivatives Parameters ---------- X_test: np.ndarray(N, D), The input point where the acquisition_functions function should be evaluate. derivative: Boolean If is set to true also the derivative of the acquisition_functions function at X is returned Not tested! Returns ------- np.ndarray(N,) Relative change of entropy of pmin np.ndarray(N,D) Derivatives with respect to X (only if derivative=True) """ acq = np.zeros([X_test.shape[0]]) grad = np.zeros([X_test.shape[0], X_test.shape[1]]) for i, X in enumerate(X_test): if derivative: acq[i], grad[i] = self.dh_fun(X[None, :], derivative=True) else: acq[i] = self.dh_fun(X[None, :], derivative=False) if np.any(np.isnan(acq[i])) or np.any(acq[i] == np.inf): acq[i] = -sys.float_info.max if derivative: return acq, grad else: return acq
[ "def", "compute", "(", "self", ",", "X_test", ",", "derivative", "=", "False", ",", "*", "*", "kwargs", ")", ":", "acq", "=", "np", ".", "zeros", "(", "[", "X_test", ".", "shape", "[", "0", "]", "]", ")", "grad", "=", "np", ".", "zeros", "(", "[", "X_test", ".", "shape", "[", "0", "]", ",", "X_test", ".", "shape", "[", "1", "]", "]", ")", "for", "i", ",", "X", "in", "enumerate", "(", "X_test", ")", ":", "if", "derivative", ":", "acq", "[", "i", "]", ",", "grad", "[", "i", "]", "=", "self", ".", "dh_fun", "(", "X", "[", "None", ",", ":", "]", ",", "derivative", "=", "True", ")", "else", ":", "acq", "[", "i", "]", "=", "self", ".", "dh_fun", "(", "X", "[", "None", ",", ":", "]", ",", "derivative", "=", "False", ")", "if", "np", ".", "any", "(", "np", ".", "isnan", "(", "acq", "[", "i", "]", ")", ")", "or", "np", ".", "any", "(", "acq", "[", "i", "]", "==", "np", ".", "inf", ")", ":", "acq", "[", "i", "]", "=", "-", "sys", ".", "float_info", ".", "max", "if", "derivative", ":", "return", "acq", ",", "grad", "else", ":", "return", "acq" ]
https://github.com/automl/RoBO/blob/91366b12a1a3deb8e80dd08599e0eaf4df28adc1/robo/acquisition_functions/information_gain.py#L87-L125
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/conversations/views.py
python
check_message_box_space
(redirect_to=None)
Checks the message quota has been exceeded. If thats the case it flashes a message and redirects back to some endpoint. :param redirect_to: The endpoint to redirect to. If set to ``None`` it will redirect to the ``conversations_bp.inbox`` endpoint.
Checks the message quota has been exceeded. If thats the case it flashes a message and redirects back to some endpoint.
[ "Checks", "the", "message", "quota", "has", "been", "exceeded", ".", "If", "thats", "the", "case", "it", "flashes", "a", "message", "and", "redirects", "back", "to", "some", "endpoint", "." ]
def check_message_box_space(redirect_to=None): """Checks the message quota has been exceeded. If thats the case it flashes a message and redirects back to some endpoint. :param redirect_to: The endpoint to redirect to. If set to ``None`` it will redirect to the ``conversations_bp.inbox`` endpoint. """ if get_message_count(current_user) >= flaskbb_config["MESSAGE_QUOTA"]: flash( _( "You cannot send any messages anymore because you have " "reached your message limit." ), "danger" ) return redirect(redirect_to or url_for("conversations_bp.inbox"))
[ "def", "check_message_box_space", "(", "redirect_to", "=", "None", ")", ":", "if", "get_message_count", "(", "current_user", ")", ">=", "flaskbb_config", "[", "\"MESSAGE_QUOTA\"", "]", ":", "flash", "(", "_", "(", "\"You cannot send any messages anymore because you have \"", "\"reached your message limit.\"", ")", ",", "\"danger\"", ")", "return", "redirect", "(", "redirect_to", "or", "url_for", "(", "\"conversations_bp.inbox\"", ")", ")" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/conversations/views.py#L38-L53
spulec/moto
a688c0032596a7dfef122b69a08f2bec3be2e481
moto/cloudwatch/models.py
python
CloudWatchBackend._list_element_starts_with
(items, needle)
return False
True of any of the list elements starts with needle
True of any of the list elements starts with needle
[ "True", "of", "any", "of", "the", "list", "elements", "starts", "with", "needle" ]
def _list_element_starts_with(items, needle): """True of any of the list elements starts with needle""" for item in items: if item.startswith(needle): return True return False
[ "def", "_list_element_starts_with", "(", "items", ",", "needle", ")", ":", "for", "item", "in", "items", ":", "if", "item", ".", "startswith", "(", "needle", ")", ":", "return", "True", "return", "False" ]
https://github.com/spulec/moto/blob/a688c0032596a7dfef122b69a08f2bec3be2e481/moto/cloudwatch/models.py#L413-L418
theelous3/asks
5c58e2ad3ff8158bf6673475bfb6ee0f6817b2aa
asks/sessions.py
python
BaseSession.__init__
(self, headers=None, ssl_context=None)
Args: headers (dict): Headers to be applied to all requests. headers set by http method call will take precedence and overwrite headers set by the headers arg. ssl_context (ssl.SSLContext): SSL context to use for https connections.
Args: headers (dict): Headers to be applied to all requests. headers set by http method call will take precedence and overwrite headers set by the headers arg. ssl_context (ssl.SSLContext): SSL context to use for https connections.
[ "Args", ":", "headers", "(", "dict", ")", ":", "Headers", "to", "be", "applied", "to", "all", "requests", ".", "headers", "set", "by", "http", "method", "call", "will", "take", "precedence", "and", "overwrite", "headers", "set", "by", "the", "headers", "arg", ".", "ssl_context", "(", "ssl", ".", "SSLContext", ")", ":", "SSL", "context", "to", "use", "for", "https", "connections", "." ]
def __init__(self, headers=None, ssl_context=None): """ Args: headers (dict): Headers to be applied to all requests. headers set by http method call will take precedence and overwrite headers set by the headers arg. ssl_context (ssl.SSLContext): SSL context to use for https connections. """ if headers is not None: self.headers = headers else: self.headers = {} self.ssl_context = ssl_context self.encoding = None self.source_address = None self._cookie_tracker = None
[ "def", "__init__", "(", "self", ",", "headers", "=", "None", ",", "ssl_context", "=", "None", ")", ":", "if", "headers", "is", "not", "None", ":", "self", ".", "headers", "=", "headers", "else", ":", "self", ".", "headers", "=", "{", "}", "self", ".", "ssl_context", "=", "ssl_context", "self", ".", "encoding", "=", "None", "self", ".", "source_address", "=", "None", "self", ".", "_cookie_tracker", "=", "None" ]
https://github.com/theelous3/asks/blob/5c58e2ad3ff8158bf6673475bfb6ee0f6817b2aa/asks/sessions.py#L30-L46
googledatalab/pydatalab
1c86e26a0d24e3bc8097895ddeab4d0607be4c40
google/datalab/commands/_datalab.py
python
_project_get_fn
(args, cell)
return google.datalab.utils.commands.render_text(ctx.project_id)
[]
def _project_get_fn(args, cell): ctx = google.datalab.Context.default() return google.datalab.utils.commands.render_text(ctx.project_id)
[ "def", "_project_get_fn", "(", "args", ",", "cell", ")", ":", "ctx", "=", "google", ".", "datalab", ".", "Context", ".", "default", "(", ")", "return", "google", ".", "datalab", ".", "utils", ".", "commands", ".", "render_text", "(", "ctx", ".", "project_id", ")" ]
https://github.com/googledatalab/pydatalab/blob/1c86e26a0d24e3bc8097895ddeab4d0607be4c40/google/datalab/commands/_datalab.py#L97-L99
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_openshift/src/lib/pvc.py
python
PersistentVolumeClaim.storage_class_name
(self, data)
storage_class_name property setter
storage_class_name property setter
[ "storage_class_name", "property", "setter" ]
def storage_class_name(self, data): ''' storage_class_name property setter''' self._storage_class_name = data
[ "def", "storage_class_name", "(", "self", ",", "data", ")", ":", "self", ".", "_storage_class_name", "=", "data" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_openshift/src/lib/pvc.py#L82-L84
kevoreilly/CAPEv2
6cf79c33264624b3604d4cd432cde2a6b4536de6
analyzer/linux/lib/common/abstracts.py
python
Package.execute
(self, cmd)
return p.pid
Start an executable for analysis. @param path: executable path @param args: executable arguments @return: process pid
Start an executable for analysis.
[ "Start", "an", "executable", "for", "analysis", "." ]
def execute(self, cmd): """Start an executable for analysis. @param path: executable path @param args: executable arguments @return: process pid """ p = Process() if not p.execute(cmd): raise CuckooPackageError("Unable to execute the initial process, analysis aborted") return p.pid
[ "def", "execute", "(", "self", ",", "cmd", ")", ":", "p", "=", "Process", "(", ")", "if", "not", "p", ".", "execute", "(", "cmd", ")", ":", "raise", "CuckooPackageError", "(", "\"Unable to execute the initial process, analysis aborted\"", ")", "return", "p", ".", "pid" ]
https://github.com/kevoreilly/CAPEv2/blob/6cf79c33264624b3604d4cd432cde2a6b4536de6/analyzer/linux/lib/common/abstracts.py#L37-L47
IntelLabs/coach
dea46ae0d22b0a0cd30b9fc138a4a2642e1b9d9d
rl_coach/architectures/tensorflow_components/savers.py
python
GlobalVariableSaver.path
(self)
return ""
Relative path for save/load. If two checkpoint objects return the same path, they must be merge-able.
Relative path for save/load. If two checkpoint objects return the same path, they must be merge-able.
[ "Relative", "path", "for", "save", "/", "load", ".", "If", "two", "checkpoint", "objects", "return", "the", "same", "path", "they", "must", "be", "merge", "-", "able", "." ]
def path(self): """ Relative path for save/load. If two checkpoint objects return the same path, they must be merge-able. """ return ""
[ "def", "path", "(", "self", ")", ":", "return", "\"\"" ]
https://github.com/IntelLabs/coach/blob/dea46ae0d22b0a0cd30b9fc138a4a2642e1b9d9d/rl_coach/architectures/tensorflow_components/savers.py#L49-L53
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_hxb2/lib/python3.5/site-packages/pip/_vendor/ipaddress.py
python
_compat_to_bytes
(intval, length, endianess)
[]
def _compat_to_bytes(intval, length, endianess): assert isinstance(intval, _compat_int_types) assert endianess == 'big' if length == 4: if intval < 0 or intval >= 2 ** 32: raise struct.error("integer out of range for 'I' format code") return struct.pack(b'!I', intval) elif length == 16: if intval < 0 or intval >= 2 ** 128: raise struct.error("integer out of range for 'QQ' format code") return struct.pack(b'!QQ', intval >> 64, intval & 0xffffffffffffffff) else: raise NotImplementedError()
[ "def", "_compat_to_bytes", "(", "intval", ",", "length", ",", "endianess", ")", ":", "assert", "isinstance", "(", "intval", ",", "_compat_int_types", ")", "assert", "endianess", "==", "'big'", "if", "length", "==", "4", ":", "if", "intval", "<", "0", "or", "intval", ">=", "2", "**", "32", ":", "raise", "struct", ".", "error", "(", "\"integer out of range for 'I' format code\"", ")", "return", "struct", ".", "pack", "(", "b'!I'", ",", "intval", ")", "elif", "length", "==", "16", ":", "if", "intval", "<", "0", "or", "intval", ">=", "2", "**", "128", ":", "raise", "struct", ".", "error", "(", "\"integer out of range for 'QQ' format code\"", ")", "return", "struct", ".", "pack", "(", "b'!QQ'", ",", "intval", ">>", "64", ",", "intval", "&", "0xffffffffffffffff", ")", "else", ":", "raise", "NotImplementedError", "(", ")" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/pip/_vendor/ipaddress.py#L48-L60
quodlibet/quodlibet
e3099c89f7aa6524380795d325cc14630031886c
quodlibet/errorreport/sentrywrapper.py
python
Sentry.add_tag
(self, key, value)
Attach tags to the error report. Args: key (str) value (str) The keys are arbitrary, but some have a special meaning: * "release" will show up as a separate page in sentry * "environment" will add a dropdown for grouping
Attach tags to the error report.
[ "Attach", "tags", "to", "the", "error", "report", "." ]
def add_tag(self, key, value): """Attach tags to the error report. Args: key (str) value (str) The keys are arbitrary, but some have a special meaning: * "release" will show up as a separate page in sentry * "environment" will add a dropdown for grouping """ self._tags[key] = value
[ "def", "add_tag", "(", "self", ",", "key", ",", "value", ")", ":", "self", ".", "_tags", "[", "key", "]", "=", "value" ]
https://github.com/quodlibet/quodlibet/blob/e3099c89f7aa6524380795d325cc14630031886c/quodlibet/errorreport/sentrywrapper.py#L186-L199
jliljebl/flowblade
995313a509b80e99eb1ad550d945bdda5995093b
flowblade-trunk/Flowblade/dialogs.py
python
_get_kb_row
(msg1, msg2, edit_launch=None)
return row
[]
def _get_kb_row(msg1, msg2, edit_launch=None): label1 = Gtk.Label(label=msg1) label2 = Gtk.Label(label=msg2) if edit_launch == None: widget = Gtk.Label() else: widget = edit_launch.widget edit_launch.set_shortcut_label(label1) KB_SHORTCUT_ROW_WIDTH = 500 KB_SHORTCUT_ROW_HEIGHT = 22 row = guiutils.get_three_column_box(label1, label2, widget, 170, 48) row.set_size_request(KB_SHORTCUT_ROW_WIDTH, KB_SHORTCUT_ROW_HEIGHT) row.show() return row
[ "def", "_get_kb_row", "(", "msg1", ",", "msg2", ",", "edit_launch", "=", "None", ")", ":", "label1", "=", "Gtk", ".", "Label", "(", "label", "=", "msg1", ")", "label2", "=", "Gtk", ".", "Label", "(", "label", "=", "msg2", ")", "if", "edit_launch", "==", "None", ":", "widget", "=", "Gtk", ".", "Label", "(", ")", "else", ":", "widget", "=", "edit_launch", ".", "widget", "edit_launch", ".", "set_shortcut_label", "(", "label1", ")", "KB_SHORTCUT_ROW_WIDTH", "=", "500", "KB_SHORTCUT_ROW_HEIGHT", "=", "22", "row", "=", "guiutils", ".", "get_three_column_box", "(", "label1", ",", "label2", ",", "widget", ",", "170", ",", "48", ")", "row", ".", "set_size_request", "(", "KB_SHORTCUT_ROW_WIDTH", ",", "KB_SHORTCUT_ROW_HEIGHT", ")", "row", ".", "show", "(", ")", "return", "row" ]
https://github.com/jliljebl/flowblade/blob/995313a509b80e99eb1ad550d945bdda5995093b/flowblade-trunk/Flowblade/dialogs.py#L1715-L1730
ApostropheEditor/Apostrophe
cc30858c15f3408296d73202497d3cdef5a46064
apostrophe/text_view_undo_redo_handler.py
python
UndoableInsert.__init__
(self, text_iter, text, length)
[]
def __init__(self, text_iter, text, length): self.offset = text_iter.get_offset() self.text = text self.length = length self.mergeable = not bool( self.length > 1 or self.text in ( "\r", "\n", " "))
[ "def", "__init__", "(", "self", ",", "text_iter", ",", "text", ",", "length", ")", ":", "self", ".", "offset", "=", "text_iter", ".", "get_offset", "(", ")", "self", ".", "text", "=", "text", "self", ".", "length", "=", "length", "self", ".", "mergeable", "=", "not", "bool", "(", "self", ".", "length", ">", "1", "or", "self", ".", "text", "in", "(", "\"\\r\"", ",", "\"\\n\"", ",", "\" \"", ")", ")" ]
https://github.com/ApostropheEditor/Apostrophe/blob/cc30858c15f3408296d73202497d3cdef5a46064/apostrophe/text_view_undo_redo_handler.py#L9-L15
osmr/imgclsmob
f2993d3ce73a2f7ddba05da3891defb08547d504
tensorflow_/tensorflowcv/models/preresnet.py
python
preresnet18
(**kwargs)
return get_preresnet(blocks=18, model_name="preresnet18", **kwargs)
PreResNet-18 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. Returns: ------- functor Functor for model graph creation with extra fields.
PreResNet-18 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
[ "PreResNet", "-", "18", "model", "from", "Identity", "Mappings", "in", "Deep", "Residual", "Networks", "https", ":", "//", "arxiv", ".", "org", "/", "abs", "/", "1603", ".", "05027", "." ]
def preresnet18(**kwargs): """ PreResNet-18 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. Returns: ------- functor Functor for model graph creation with extra fields. """ return get_preresnet(blocks=18, model_name="preresnet18", **kwargs)
[ "def", "preresnet18", "(", "*", "*", "kwargs", ")", ":", "return", "get_preresnet", "(", "blocks", "=", "18", ",", "model_name", "=", "\"preresnet18\"", ",", "*", "*", "kwargs", ")" ]
https://github.com/osmr/imgclsmob/blob/f2993d3ce73a2f7ddba05da3891defb08547d504/tensorflow_/tensorflowcv/models/preresnet.py#L665-L681
sympy/sympy
d822fcba181155b85ff2b29fe525adbafb22b448
sympy/plotting/pygletplot/color_scheme.py
python
ColorScheme.apply_to_curve
(self, verts, u_set, set_len=None, inc_pos=None)
return cverts
Apply this color scheme to a set of vertices over a single independent variable u.
Apply this color scheme to a set of vertices over a single independent variable u.
[ "Apply", "this", "color", "scheme", "to", "a", "set", "of", "vertices", "over", "a", "single", "independent", "variable", "u", "." ]
def apply_to_curve(self, verts, u_set, set_len=None, inc_pos=None): """ Apply this color scheme to a set of vertices over a single independent variable u. """ bounds = create_bounds() cverts = list() if callable(set_len): set_len(len(u_set)*2) # calculate f() = r,g,b for each vert # and find the min and max for r,g,b for _u in range(len(u_set)): if verts[_u] is None: cverts.append(None) else: x, y, z = verts[_u] u, v = u_set[_u], None c = self(x, y, z, u, v) if c is not None: c = list(c) update_bounds(bounds, c) cverts.append(c) if callable(inc_pos): inc_pos() # scale and apply gradient for _u in range(len(u_set)): if cverts[_u] is not None: for _c in range(3): # scale from [f_min, f_max] to [0,1] cverts[_u][_c] = rinterpolate(bounds[_c][0], bounds[_c][1], cverts[_u][_c]) # apply gradient cverts[_u] = self.gradient(*cverts[_u]) if callable(inc_pos): inc_pos() return cverts
[ "def", "apply_to_curve", "(", "self", ",", "verts", ",", "u_set", ",", "set_len", "=", "None", ",", "inc_pos", "=", "None", ")", ":", "bounds", "=", "create_bounds", "(", ")", "cverts", "=", "list", "(", ")", "if", "callable", "(", "set_len", ")", ":", "set_len", "(", "len", "(", "u_set", ")", "*", "2", ")", "# calculate f() = r,g,b for each vert", "# and find the min and max for r,g,b", "for", "_u", "in", "range", "(", "len", "(", "u_set", ")", ")", ":", "if", "verts", "[", "_u", "]", "is", "None", ":", "cverts", ".", "append", "(", "None", ")", "else", ":", "x", ",", "y", ",", "z", "=", "verts", "[", "_u", "]", "u", ",", "v", "=", "u_set", "[", "_u", "]", ",", "None", "c", "=", "self", "(", "x", ",", "y", ",", "z", ",", "u", ",", "v", ")", "if", "c", "is", "not", "None", ":", "c", "=", "list", "(", "c", ")", "update_bounds", "(", "bounds", ",", "c", ")", "cverts", ".", "append", "(", "c", ")", "if", "callable", "(", "inc_pos", ")", ":", "inc_pos", "(", ")", "# scale and apply gradient", "for", "_u", "in", "range", "(", "len", "(", "u_set", ")", ")", ":", "if", "cverts", "[", "_u", "]", "is", "not", "None", ":", "for", "_c", "in", "range", "(", "3", ")", ":", "# scale from [f_min, f_max] to [0,1]", "cverts", "[", "_u", "]", "[", "_c", "]", "=", "rinterpolate", "(", "bounds", "[", "_c", "]", "[", "0", "]", ",", "bounds", "[", "_c", "]", "[", "1", "]", ",", "cverts", "[", "_u", "]", "[", "_c", "]", ")", "# apply gradient", "cverts", "[", "_u", "]", "=", "self", ".", "gradient", "(", "*", "cverts", "[", "_u", "]", ")", "if", "callable", "(", "inc_pos", ")", ":", "inc_pos", "(", ")", "return", "cverts" ]
https://github.com/sympy/sympy/blob/d822fcba181155b85ff2b29fe525adbafb22b448/sympy/plotting/pygletplot/color_scheme.py#L231-L267
tomplus/kubernetes_asyncio
f028cc793e3a2c519be6a52a49fb77ff0b014c9b
kubernetes_asyncio/client/models/v1_mutating_webhook_configuration.py
python
V1MutatingWebhookConfiguration.__ne__
(self, other)
return self.to_dict() != other.to_dict()
Returns true if both objects are not equal
Returns true if both objects are not equal
[ "Returns", "true", "if", "both", "objects", "are", "not", "equal" ]
def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1MutatingWebhookConfiguration): return True return self.to_dict() != other.to_dict()
[ "def", "__ne__", "(", "self", ",", "other", ")", ":", "if", "not", "isinstance", "(", "other", ",", "V1MutatingWebhookConfiguration", ")", ":", "return", "True", "return", "self", ".", "to_dict", "(", ")", "!=", "other", ".", "to_dict", "(", ")" ]
https://github.com/tomplus/kubernetes_asyncio/blob/f028cc793e3a2c519be6a52a49fb77ff0b014c9b/kubernetes_asyncio/client/models/v1_mutating_webhook_configuration.py#L199-L204
istresearch/scrapy-cluster
01861c2dca1563aab740417d315cc4ebf9b73f72
crawler/crawling/distributed_scheduler.py
python
DistributedScheduler.expire_queues
(self)
Expires old queue_dict keys that have not been used in a long time. Prevents slow memory build up when crawling lots of different domains
Expires old queue_dict keys that have not been used in a long time. Prevents slow memory build up when crawling lots of different domains
[ "Expires", "old", "queue_dict", "keys", "that", "have", "not", "been", "used", "in", "a", "long", "time", ".", "Prevents", "slow", "memory", "build", "up", "when", "crawling", "lots", "of", "different", "domains" ]
def expire_queues(self): ''' Expires old queue_dict keys that have not been used in a long time. Prevents slow memory build up when crawling lots of different domains ''' curr_time = time.time() for key in list(self.queue_dict): diff = curr_time - self.queue_dict[key][1] if diff > self.queue_timeout: self.logger.debug("Expiring domain queue key " + key) del self.queue_dict[key] if key in self.queue_keys: self.queue_keys.remove(key)
[ "def", "expire_queues", "(", "self", ")", ":", "curr_time", "=", "time", ".", "time", "(", ")", "for", "key", "in", "list", "(", "self", ".", "queue_dict", ")", ":", "diff", "=", "curr_time", "-", "self", ".", "queue_dict", "[", "key", "]", "[", "1", "]", "if", "diff", ">", "self", ".", "queue_timeout", ":", "self", ".", "logger", ".", "debug", "(", "\"Expiring domain queue key \"", "+", "key", ")", "del", "self", ".", "queue_dict", "[", "key", "]", "if", "key", "in", "self", ".", "queue_keys", ":", "self", ".", "queue_keys", ".", "remove", "(", "key", ")" ]
https://github.com/istresearch/scrapy-cluster/blob/01861c2dca1563aab740417d315cc4ebf9b73f72/crawler/crawling/distributed_scheduler.py#L260-L272
TencentCloud/tencentcloud-sdk-python
3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2
tencentcloud/vod/v20180717/models.py
python
DeleteAIRecognitionTemplateResponse.__init__
(self)
r""" :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str
r""" :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str
[ "r", ":", "param", "RequestId", ":", "唯一请求", "ID,每次请求都会返回。定位问题时需要提供该次请求的", "RequestId。", ":", "type", "RequestId", ":", "str" ]
def __init__(self): r""" :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.RequestId = None
[ "def", "__init__", "(", "self", ")", ":", "self", ".", "RequestId", "=", "None" ]
https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/vod/v20180717/models.py#L7411-L7416
Azure/blobxfer
c6c6c143e8ee413d09a1110abafdb92e9e8afc39
blobxfer/models/synccopy.py
python
Descriptor.last_block_num
(self)
Last used block number for block id, should only be called for finalize operation :param Descriptor self: this :rtype: int :return: block number
Last used block number for block id, should only be called for finalize operation :param Descriptor self: this :rtype: int :return: block number
[ "Last", "used", "block", "number", "for", "block", "id", "should", "only", "be", "called", "for", "finalize", "operation", ":", "param", "Descriptor", "self", ":", "this", ":", "rtype", ":", "int", ":", "return", ":", "block", "number" ]
def last_block_num(self): # type: (Descriptor) -> bool """Last used block number for block id, should only be called for finalize operation :param Descriptor self: this :rtype: int :return: block number """ with self._meta_lock: return self._chunk_num - 1
[ "def", "last_block_num", "(", "self", ")", ":", "# type: (Descriptor) -> bool", "with", "self", ".", "_meta_lock", ":", "return", "self", ".", "_chunk_num", "-", "1" ]
https://github.com/Azure/blobxfer/blob/c6c6c143e8ee413d09a1110abafdb92e9e8afc39/blobxfer/models/synccopy.py#L175-L184
saltstack/salt
fae5bc757ad0f1716483ce7ae180b451545c2058
salt/modules/lxd.py
python
container_stop
( name, timeout=30, force=True, remote_addr=None, cert=None, key=None, verify_cert=True, )
return _pylxd_model_to_dict(container)
Stop a container name : Name of the container to stop remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr and its a TCP Address! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normally uses self-signed certificates.
Stop a container
[ "Stop", "a", "container" ]
def container_stop( name, timeout=30, force=True, remote_addr=None, cert=None, key=None, verify_cert=True, ): """ Stop a container name : Name of the container to stop remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr and its a TCP Address! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normally uses self-signed certificates. """ container = container_get(name, remote_addr, cert, key, verify_cert, _raw=True) container.stop(timeout, force, wait=True) return _pylxd_model_to_dict(container)
[ "def", "container_stop", "(", "name", ",", "timeout", "=", "30", ",", "force", "=", "True", ",", "remote_addr", "=", "None", ",", "cert", "=", "None", ",", "key", "=", "None", ",", "verify_cert", "=", "True", ",", ")", ":", "container", "=", "container_get", "(", "name", ",", "remote_addr", ",", "cert", ",", "key", ",", "verify_cert", ",", "_raw", "=", "True", ")", "container", ".", "stop", "(", "timeout", ",", "force", ",", "wait", "=", "True", ")", "return", "_pylxd_model_to_dict", "(", "container", ")" ]
https://github.com/saltstack/salt/blob/fae5bc757ad0f1716483ce7ae180b451545c2058/salt/modules/lxd.py#L939-L981
google/grr
8ad8a4d2c5a93c92729206b7771af19d92d4f915
grr/server/grr_response_server/rdfvalues/objects.py
python
PathInfo.root
(self)
return not self.components
[]
def root(self): return not self.components
[ "def", "root", "(", "self", ")", ":", "return", "not", "self", ".", "components" ]
https://github.com/google/grr/blob/8ad8a4d2c5a93c92729206b7771af19d92d4f915/grr/server/grr_response_server/rdfvalues/objects.py#L424-L425
jeffkit/wechat
95510106605e3870e81d7b2ea08ef7868b01d3bf
wechat/official.py
python
WxApplication.on_location_select
(self, event)
return WxEmptyResponse()
[]
def on_location_select(self, event): return WxEmptyResponse()
[ "def", "on_location_select", "(", "self", ",", "event", ")", ":", "return", "WxEmptyResponse", "(", ")" ]
https://github.com/jeffkit/wechat/blob/95510106605e3870e81d7b2ea08ef7868b01d3bf/wechat/official.py#L166-L167
oracle/oci-python-sdk
3c1604e4e212008fb6718e2f68cdb5ef71fd5793
examples/showoci/showoci_data.py
python
ShowOCIData.__get_database_mysql
(self, region_name, compartment)
[]
def __get_database_mysql(self, region_name, compartment): data = [] try: mysql = self.service.search_multi_items(self.service.C_DATABASE, self.service.C_DATABASE_MYSQL, 'region_name', region_name, 'compartment_id', compartment['id']) if mysql: for dbs in mysql: # Add subnet if dbs['subnet_id'] != 'None': dbs['subnet_name'] = self.__get_core_network_subnet_name(dbs['subnet_id']) else: dbs['subnet_name'] = "" data.append(dbs) return data except Exception as e: self.__print_error("__get_database_mysql", e) return data
[ "def", "__get_database_mysql", "(", "self", ",", "region_name", ",", "compartment", ")", ":", "data", "=", "[", "]", "try", ":", "mysql", "=", "self", ".", "service", ".", "search_multi_items", "(", "self", ".", "service", ".", "C_DATABASE", ",", "self", ".", "service", ".", "C_DATABASE_MYSQL", ",", "'region_name'", ",", "region_name", ",", "'compartment_id'", ",", "compartment", "[", "'id'", "]", ")", "if", "mysql", ":", "for", "dbs", "in", "mysql", ":", "# Add subnet", "if", "dbs", "[", "'subnet_id'", "]", "!=", "'None'", ":", "dbs", "[", "'subnet_name'", "]", "=", "self", ".", "__get_core_network_subnet_name", "(", "dbs", "[", "'subnet_id'", "]", ")", "else", ":", "dbs", "[", "'subnet_name'", "]", "=", "\"\"", "data", ".", "append", "(", "dbs", ")", "return", "data", "except", "Exception", "as", "e", ":", "self", ".", "__print_error", "(", "\"__get_database_mysql\"", ",", "e", ")", "return", "data" ]
https://github.com/oracle/oci-python-sdk/blob/3c1604e4e212008fb6718e2f68cdb5ef71fd5793/examples/showoci/showoci_data.py#L2635-L2652
skylander86/lambda-text-extractor
6da52d077a2fc571e38bfe29c33ae68f6443cd5a
lib-linux_x64/pptx/chart/axis.py
python
ValueAxis.crosses
(self)
return crosses.val
Member of :ref:`XlAxisCrosses` enumeration specifying the point on this axis where the other axis crosses, such as auto/zero, minimum, or maximum. Returns `XL_AXIS_CROSSES.CUSTOM` when a specific numeric crossing point (e.g. 1.5) is defined.
Member of :ref:`XlAxisCrosses` enumeration specifying the point on this axis where the other axis crosses, such as auto/zero, minimum, or maximum. Returns `XL_AXIS_CROSSES.CUSTOM` when a specific numeric crossing point (e.g. 1.5) is defined.
[ "Member", "of", ":", "ref", ":", "XlAxisCrosses", "enumeration", "specifying", "the", "point", "on", "this", "axis", "where", "the", "other", "axis", "crosses", "such", "as", "auto", "/", "zero", "minimum", "or", "maximum", ".", "Returns", "XL_AXIS_CROSSES", ".", "CUSTOM", "when", "a", "specific", "numeric", "crossing", "point", "(", "e", ".", "g", ".", "1", ".", "5", ")", "is", "defined", "." ]
def crosses(self): """ Member of :ref:`XlAxisCrosses` enumeration specifying the point on this axis where the other axis crosses, such as auto/zero, minimum, or maximum. Returns `XL_AXIS_CROSSES.CUSTOM` when a specific numeric crossing point (e.g. 1.5) is defined. """ crosses = self._cross_xAx.crosses if crosses is None: return XL_AXIS_CROSSES.CUSTOM return crosses.val
[ "def", "crosses", "(", "self", ")", ":", "crosses", "=", "self", ".", "_cross_xAx", ".", "crosses", "if", "crosses", "is", "None", ":", "return", "XL_AXIS_CROSSES", ".", "CUSTOM", "return", "crosses", ".", "val" ]
https://github.com/skylander86/lambda-text-extractor/blob/6da52d077a2fc571e38bfe29c33ae68f6443cd5a/lib-linux_x64/pptx/chart/axis.py#L419-L429
benanne/morb
143fef001a747308a9a3ef775615911b7e64081f
examples/utils.py
python
generate_data
(N)
return u
Creates a noisy dataset with some simple pattern in it.
Creates a noisy dataset with some simple pattern in it.
[ "Creates", "a", "noisy", "dataset", "with", "some", "simple", "pattern", "in", "it", "." ]
def generate_data(N): """Creates a noisy dataset with some simple pattern in it.""" T = N * 38 u = np.mat(np.zeros((T, 20))) for i in range(1, T, 38): if i % 76 == 1: u[i - 1:i + 19, :] = np.eye(20) u[i + 18:i + 38, :] = np.eye(20)[np.arange(19, -1, -1)] u[i - 1:i + 19, :] += np.eye(20)[np.arange(19, -1, -1)] else: u[i - 1:i + 19, 1] = 1 u[i + 18:i + 38, 8] = 1 return u
[ "def", "generate_data", "(", "N", ")", ":", "T", "=", "N", "*", "38", "u", "=", "np", ".", "mat", "(", "np", ".", "zeros", "(", "(", "T", ",", "20", ")", ")", ")", "for", "i", "in", "range", "(", "1", ",", "T", ",", "38", ")", ":", "if", "i", "%", "76", "==", "1", ":", "u", "[", "i", "-", "1", ":", "i", "+", "19", ",", ":", "]", "=", "np", ".", "eye", "(", "20", ")", "u", "[", "i", "+", "18", ":", "i", "+", "38", ",", ":", "]", "=", "np", ".", "eye", "(", "20", ")", "[", "np", ".", "arange", "(", "19", ",", "-", "1", ",", "-", "1", ")", "]", "u", "[", "i", "-", "1", ":", "i", "+", "19", ",", ":", "]", "+=", "np", ".", "eye", "(", "20", ")", "[", "np", ".", "arange", "(", "19", ",", "-", "1", ",", "-", "1", ")", "]", "else", ":", "u", "[", "i", "-", "1", ":", "i", "+", "19", ",", "1", "]", "=", "1", "u", "[", "i", "+", "18", ":", "i", "+", "38", ",", "8", "]", "=", "1", "return", "u" ]
https://github.com/benanne/morb/blob/143fef001a747308a9a3ef775615911b7e64081f/examples/utils.py#L5-L17
fake-name/ReadableWebProxy
ed5c7abe38706acc2684a1e6cd80242a03c5f010
WebMirror/management/rss_parser_funcs/feed_parse_extractFaerytranslationsWordpressCom.py
python
extractFaerytranslationsWordpressCom
(item)
return False
Parser for 'faerytranslations.wordpress.com'
Parser for 'faerytranslations.wordpress.com'
[ "Parser", "for", "faerytranslations", ".", "wordpress", ".", "com" ]
def extractFaerytranslationsWordpressCom(item): ''' Parser for 'faerytranslations.wordpress.com' ''' vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or "preview" in item['title'].lower(): return None tagmap = [ ('bnddsb', 'brother next door, don\'t sleep on my bed', 'translated'), ('brother next door, don\'t sleep on my bed', 'brother next door, don\'t sleep on my bed', 'translated'), ('DS', 'demon\'s sweetheart', 'translated'), ('demon\'s sweetheart', 'demon\'s sweetheart', 'translated'), ('trwnla', 'the rich woman is no longer acting', 'translated'), ('the rich woman is no longer acting', 'the rich woman is no longer acting', 'translated'), ('tvreg', 'the villain\'s reborn ex-girlfriend', 'translated'), ('the villain\'s reborn ex-girlfriend', 'the villain\'s reborn ex-girlfriend', 'translated'), ('sdwz', 'splendid dream of wanzhou', 'translated'), ('splendid dream of wanzhou', 'splendid dream of wanzhou', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
[ "def", "extractFaerytranslationsWordpressCom", "(", "item", ")", ":", "vol", ",", "chp", ",", "frag", ",", "postfix", "=", "extractVolChapterFragmentPostfix", "(", "item", "[", "'title'", "]", ")", "if", "not", "(", "chp", "or", "vol", ")", "or", "\"preview\"", "in", "item", "[", "'title'", "]", ".", "lower", "(", ")", ":", "return", "None", "tagmap", "=", "[", "(", "'bnddsb'", ",", "'brother next door, don\\'t sleep on my bed'", ",", "'translated'", ")", ",", "(", "'brother next door, don\\'t sleep on my bed'", ",", "'brother next door, don\\'t sleep on my bed'", ",", "'translated'", ")", ",", "(", "'DS'", ",", "'demon\\'s sweetheart'", ",", "'translated'", ")", ",", "(", "'demon\\'s sweetheart'", ",", "'demon\\'s sweetheart'", ",", "'translated'", ")", ",", "(", "'trwnla'", ",", "'the rich woman is no longer acting'", ",", "'translated'", ")", ",", "(", "'the rich woman is no longer acting'", ",", "'the rich woman is no longer acting'", ",", "'translated'", ")", ",", "(", "'tvreg'", ",", "'the villain\\'s reborn ex-girlfriend'", ",", "'translated'", ")", ",", "(", "'the villain\\'s reborn ex-girlfriend'", ",", "'the villain\\'s reborn ex-girlfriend'", ",", "'translated'", ")", ",", "(", "'sdwz'", ",", "'splendid dream of wanzhou'", ",", "'translated'", ")", ",", "(", "'splendid dream of wanzhou'", ",", "'splendid dream of wanzhou'", ",", "'translated'", ")", ",", "(", "'PRC'", ",", "'PRC'", ",", "'translated'", ")", ",", "(", "'Loiterous'", ",", "'Loiterous'", ",", "'oel'", ")", ",", "]", "for", "tagname", ",", "name", ",", "tl_type", "in", "tagmap", ":", "if", "tagname", "in", "item", "[", "'tags'", "]", ":", "return", "buildReleaseMessageWithType", "(", "item", ",", "name", ",", "vol", ",", "chp", ",", "frag", "=", "frag", ",", "postfix", "=", "postfix", ",", "tl_type", "=", "tl_type", ")", "return", "False" ]
https://github.com/fake-name/ReadableWebProxy/blob/ed5c7abe38706acc2684a1e6cd80242a03c5f010/WebMirror/management/rss_parser_funcs/feed_parse_extractFaerytranslationsWordpressCom.py#L1-L30
sublimelsp/LSP
19a01aa045de04bcc805e56043923656548050e0
plugin/core/signature_help.py
python
SigHelp.context
(self, trigger_kind: int, trigger_character: str, is_retrigger: bool)
return { "triggerKind": trigger_kind, "triggerCharacter": trigger_character, "isRetrigger": is_retrigger, "activeSignatureHelp": self._state }
Extract the state out of this state machine to send back to the language server. XXX: Currently unused. Revisit this some time in the future.
Extract the state out of this state machine to send back to the language server.
[ "Extract", "the", "state", "out", "of", "this", "state", "machine", "to", "send", "back", "to", "the", "language", "server", "." ]
def context(self, trigger_kind: int, trigger_character: str, is_retrigger: bool) -> SignatureHelpContext: """ Extract the state out of this state machine to send back to the language server. XXX: Currently unused. Revisit this some time in the future. """ self._state["activeSignature"] = self._active_signature_index return { "triggerKind": trigger_kind, "triggerCharacter": trigger_character, "isRetrigger": is_retrigger, "activeSignatureHelp": self._state }
[ "def", "context", "(", "self", ",", "trigger_kind", ":", "int", ",", "trigger_character", ":", "str", ",", "is_retrigger", ":", "bool", ")", "->", "SignatureHelpContext", ":", "self", ".", "_state", "[", "\"activeSignature\"", "]", "=", "self", ".", "_active_signature_index", "return", "{", "\"triggerKind\"", ":", "trigger_kind", ",", "\"triggerCharacter\"", ":", "trigger_character", ",", "\"isRetrigger\"", ":", "is_retrigger", ",", "\"activeSignatureHelp\"", ":", "self", ".", "_state", "}" ]
https://github.com/sublimelsp/LSP/blob/19a01aa045de04bcc805e56043923656548050e0/plugin/core/signature_help.py#L76-L88
harpribot/deep-summarization
9b3bb1daae11a1db2386dbe4a71848714e6127f8
models/sequenceNet.py
python
NeuralNet.set_parameters
(self, train_batch_size, test_batch_size, memory_dim, learning_rate)
Set the parameters for the model and training. :param train_batch_size: The batch size of examples used for batch training :param test_batch_size: The batch size of test examples used for testing :param memory_dim: The length of the hidden vector produced by the encoder :param learning_rate: The learning rate for Stochastic Gradient Descent :return: None
Set the parameters for the model and training.
[ "Set", "the", "parameters", "for", "the", "model", "and", "training", "." ]
def set_parameters(self, train_batch_size, test_batch_size, memory_dim, learning_rate): """ Set the parameters for the model and training. :param train_batch_size: The batch size of examples used for batch training :param test_batch_size: The batch size of test examples used for testing :param memory_dim: The length of the hidden vector produced by the encoder :param learning_rate: The learning rate for Stochastic Gradient Descent :return: None """ self.train_batch_size = train_batch_size self.test_batch_size = test_batch_size self.memory_dim = memory_dim self.learning_rate = learning_rate
[ "def", "set_parameters", "(", "self", ",", "train_batch_size", ",", "test_batch_size", ",", "memory_dim", ",", "learning_rate", ")", ":", "self", ".", "train_batch_size", "=", "train_batch_size", "self", ".", "test_batch_size", "=", "test_batch_size", "self", ".", "memory_dim", "=", "memory_dim", "self", ".", "learning_rate", "=", "learning_rate" ]
https://github.com/harpribot/deep-summarization/blob/9b3bb1daae11a1db2386dbe4a71848714e6127f8/models/sequenceNet.py#L34-L47
OpenEndedGroup/Field
4f7c8edfb01bb0ccc927b78d3c500f018a4ae37c
Contents/lib/python/decimal.py
python
Context.to_integral_exact
(self, a)
return a.to_integral_exact(context=self)
Rounds to an integer. When the operand has a negative exponent, the result is the same as using the quantize() operation using the given operand as the left-hand-operand, 1E+0 as the right-hand-operand, and the precision of the operand as the precision setting; Inexact and Rounded flags are allowed in this operation. The rounding mode is taken from the context. >>> ExtendedContext.to_integral_exact(Decimal('2.1')) Decimal("2") >>> ExtendedContext.to_integral_exact(Decimal('100')) Decimal("100") >>> ExtendedContext.to_integral_exact(Decimal('100.0')) Decimal("100") >>> ExtendedContext.to_integral_exact(Decimal('101.5')) Decimal("102") >>> ExtendedContext.to_integral_exact(Decimal('-101.5')) Decimal("-102") >>> ExtendedContext.to_integral_exact(Decimal('10E+5')) Decimal("1.0E+6") >>> ExtendedContext.to_integral_exact(Decimal('7.89E+77')) Decimal("7.89E+77") >>> ExtendedContext.to_integral_exact(Decimal('-Inf')) Decimal("-Infinity")
Rounds to an integer.
[ "Rounds", "to", "an", "integer", "." ]
def to_integral_exact(self, a): """Rounds to an integer. When the operand has a negative exponent, the result is the same as using the quantize() operation using the given operand as the left-hand-operand, 1E+0 as the right-hand-operand, and the precision of the operand as the precision setting; Inexact and Rounded flags are allowed in this operation. The rounding mode is taken from the context. >>> ExtendedContext.to_integral_exact(Decimal('2.1')) Decimal("2") >>> ExtendedContext.to_integral_exact(Decimal('100')) Decimal("100") >>> ExtendedContext.to_integral_exact(Decimal('100.0')) Decimal("100") >>> ExtendedContext.to_integral_exact(Decimal('101.5')) Decimal("102") >>> ExtendedContext.to_integral_exact(Decimal('-101.5')) Decimal("-102") >>> ExtendedContext.to_integral_exact(Decimal('10E+5')) Decimal("1.0E+6") >>> ExtendedContext.to_integral_exact(Decimal('7.89E+77')) Decimal("7.89E+77") >>> ExtendedContext.to_integral_exact(Decimal('-Inf')) Decimal("-Infinity") """ return a.to_integral_exact(context=self)
[ "def", "to_integral_exact", "(", "self", ",", "a", ")", ":", "return", "a", ".", "to_integral_exact", "(", "context", "=", "self", ")" ]
https://github.com/OpenEndedGroup/Field/blob/4f7c8edfb01bb0ccc927b78d3c500f018a4ae37c/Contents/lib/python/decimal.py#L4612-L4639
OpenWaterAnalytics/pyswmm
a73409b2a9ebf4131ba42fc4c03a69453fc29d9a
pyswmm/output.py
python
Output.link_attribute
( self, attribute: shared_enum.LinkAttribute, time_index: Union[int, datetime, None] = None, )
return {link: value for link, value in zip(self.links, values)}
For all links at given time, get a particular attribute. :param attribute: attribute from swmm.toolkit.shared_enum.LinkAttribute: FLOW_RATE, FLOW_DEPTH, FLOW_VELOCITY, FLOW_VOLUME, CAPACITY, POLLUT_CONC_0 :type attribute: swmm.toolkit.shared_enum.LinkAttribute :param time_index: datetime or simulation index, defaults to None :type time_index: Union[int, datetime, None] :returns: dict of attribute values for all nodes at given timestep :rtype: dict {link : value} Examples: >>> from swmm.toolkit.shared_enum import LinkAttribute >>> from pyswmm import Output >>> >>> with Output('tests/data/model_full_features.out') as out: ... data = out.link_attribute(LinkAttribute.FLOW_RATE, datetime(2015, 11, 1, 16)) ... for object in data: ... print(object, data[object]) >>> C1:C2 7.218499660491943 >>> C2 8.227274894714355 >>> C3 9.240239143371582
For all links at given time, get a particular attribute.
[ "For", "all", "links", "at", "given", "time", "get", "a", "particular", "attribute", "." ]
def link_attribute( self, attribute: shared_enum.LinkAttribute, time_index: Union[int, datetime, None] = None, ): """ For all links at given time, get a particular attribute. :param attribute: attribute from swmm.toolkit.shared_enum.LinkAttribute: FLOW_RATE, FLOW_DEPTH, FLOW_VELOCITY, FLOW_VOLUME, CAPACITY, POLLUT_CONC_0 :type attribute: swmm.toolkit.shared_enum.LinkAttribute :param time_index: datetime or simulation index, defaults to None :type time_index: Union[int, datetime, None] :returns: dict of attribute values for all nodes at given timestep :rtype: dict {link : value} Examples: >>> from swmm.toolkit.shared_enum import LinkAttribute >>> from pyswmm import Output >>> >>> with Output('tests/data/model_full_features.out') as out: ... data = out.link_attribute(LinkAttribute.FLOW_RATE, datetime(2015, 11, 1, 16)) ... for object in data: ... print(object, data[object]) >>> C1:C2 7.218499660491943 >>> C2 8.227274894714355 >>> C3 9.240239143371582 """ time_index = self.verify_time( time_index, self.times, self.start, self.end, self.report, 0 ) values = output.get_link_attribute(self.handle, time_index, attribute) return {link: value for link, value in zip(self.links, values)}
[ "def", "link_attribute", "(", "self", ",", "attribute", ":", "shared_enum", ".", "LinkAttribute", ",", "time_index", ":", "Union", "[", "int", ",", "datetime", ",", "None", "]", "=", "None", ",", ")", ":", "time_index", "=", "self", ".", "verify_time", "(", "time_index", ",", "self", ".", "times", ",", "self", ".", "start", ",", "self", ".", "end", ",", "self", ".", "report", ",", "0", ")", "values", "=", "output", ".", "get_link_attribute", "(", "self", ".", "handle", ",", "time_index", ",", "attribute", ")", "return", "{", "link", ":", "value", "for", "link", ",", "value", "in", "zip", "(", "self", ".", "links", ",", "values", ")", "}" ]
https://github.com/OpenWaterAnalytics/pyswmm/blob/a73409b2a9ebf4131ba42fc4c03a69453fc29d9a/pyswmm/output.py#L761-L796
openhatch/oh-mainline
ce29352a034e1223141dcc2f317030bbc3359a51
vendor/packages/Django/django/contrib/localflavor/pl/forms.py
python
PLPESELField.has_valid_checksum
(self, number)
return result % 10 == 0
Calculates a checksum with the provided algorithm.
Calculates a checksum with the provided algorithm.
[ "Calculates", "a", "checksum", "with", "the", "provided", "algorithm", "." ]
def has_valid_checksum(self, number): """ Calculates a checksum with the provided algorithm. """ multiple_table = (1, 3, 7, 9, 1, 3, 7, 9, 1, 3, 1) result = 0 for i in range(len(number)): result += int(number[i]) * multiple_table[i] return result % 10 == 0
[ "def", "has_valid_checksum", "(", "self", ",", "number", ")", ":", "multiple_table", "=", "(", "1", ",", "3", ",", "7", ",", "9", ",", "1", ",", "3", ",", "7", ",", "9", ",", "1", ",", "3", ",", "1", ")", "result", "=", "0", "for", "i", "in", "range", "(", "len", "(", "number", ")", ")", ":", "result", "+=", "int", "(", "number", "[", "i", "]", ")", "*", "multiple_table", "[", "i", "]", "return", "result", "%", "10", "==", "0" ]
https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/Django/django/contrib/localflavor/pl/forms.py#L58-L66
X-Plane/XPlane2Blender
5face69fbb828a414b167dcb063fabf5182a54bc
io_xplane2blender/xplane_utils/xplane_commands_txt_parser.py
python
CommandInfoStruct.is_invalid
(self)
Returns "" for no errors or a string describing the issue
Returns "" for no errors or a string describing the issue
[ "Returns", "for", "no", "errors", "or", "a", "string", "describing", "the", "issue" ]
def is_invalid(self)->str: '''Returns "" for no errors or a string describing the issue''' if self.command == "": return "Command must be one or more non-whitespace characters"
[ "def", "is_invalid", "(", "self", ")", "->", "str", ":", "if", "self", ".", "command", "==", "\"\"", ":", "return", "\"Command must be one or more non-whitespace characters\"" ]
https://github.com/X-Plane/XPlane2Blender/blob/5face69fbb828a414b167dcb063fabf5182a54bc/io_xplane2blender/xplane_utils/xplane_commands_txt_parser.py#L40-L43
google-research/motion_imitation
d0e7b963c5a301984352d25a3ee0820266fa4218
mpc_controller/a1_sim.py
python
SimpleRobot.GetFootContacts
(self)
return contacts
[]
def GetFootContacts(self): all_contacts = self.pybullet_client.getContactPoints(bodyA=self.quadruped) contacts = [False, False, False, False] for contact in all_contacts: # Ignore self contacts if contact[_BODY_B_FIELD_NUMBER] == self.quadruped: continue try: toe_link_index = self._foot_link_ids.index( contact[_LINK_A_FIELD_NUMBER]) contacts[toe_link_index] = True except ValueError: continue return contacts
[ "def", "GetFootContacts", "(", "self", ")", ":", "all_contacts", "=", "self", ".", "pybullet_client", ".", "getContactPoints", "(", "bodyA", "=", "self", ".", "quadruped", ")", "contacts", "=", "[", "False", ",", "False", ",", "False", ",", "False", "]", "for", "contact", "in", "all_contacts", ":", "# Ignore self contacts", "if", "contact", "[", "_BODY_B_FIELD_NUMBER", "]", "==", "self", ".", "quadruped", ":", "continue", "try", ":", "toe_link_index", "=", "self", ".", "_foot_link_ids", ".", "index", "(", "contact", "[", "_LINK_A_FIELD_NUMBER", "]", ")", "contacts", "[", "toe_link_index", "]", "=", "True", "except", "ValueError", ":", "continue", "return", "contacts" ]
https://github.com/google-research/motion_imitation/blob/d0e7b963c5a301984352d25a3ee0820266fa4218/mpc_controller/a1_sim.py#L498-L512
FederatedAI/FATE
32540492623568ecd1afcb367360133616e02fa3
python/federatedml/model_selection/data_split/data_split.py
python
DataSplitter.callback_count_info
(self, id_train, id_validate, id_test, all_metas)
return all_metas
Tool to callback returned data count & ratio information Parameters ---------- id_train: list, id of data set id_validate: list, id of data set id_test: list, id of data set all_metas: dict, all meta info Returns ------- None
Tool to callback returned data count & ratio information Parameters ---------- id_train: list, id of data set id_validate: list, id of data set id_test: list, id of data set all_metas: dict, all meta info
[ "Tool", "to", "callback", "returned", "data", "count", "&", "ratio", "information", "Parameters", "----------", "id_train", ":", "list", "id", "of", "data", "set", "id_validate", ":", "list", "id", "of", "data", "set", "id_test", ":", "list", "id", "of", "data", "set", "all_metas", ":", "dict", "all", "meta", "info" ]
def callback_count_info(self, id_train, id_validate, id_test, all_metas): """ Tool to callback returned data count & ratio information Parameters ---------- id_train: list, id of data set id_validate: list, id of data set id_test: list, id of data set all_metas: dict, all meta info Returns ------- None """ metas = {} train_count = len(id_train) metas["train"] = train_count validate_count = len(id_validate) metas["validate"] = validate_count test_count = len(id_test) metas["test"] = test_count original_count = train_count + validate_count + test_count metas["original"] = original_count metric_name = f"{self.metric_name}_count_info" all_metas[metric_name] = metas metas = {} train_ratio = train_count / original_count validate_ratio = validate_count / original_count test_ratio = test_count / original_count metas["train"] = round(train_ratio, ROUND_NUM) metas["validate"] = round(validate_ratio, ROUND_NUM) metas["test"] = round(test_ratio, ROUND_NUM) metric_name = f"{self.metric_name}_ratio_info" all_metas[metric_name] = metas # stratified all_metas["stratified"] = self.stratified return all_metas
[ "def", "callback_count_info", "(", "self", ",", "id_train", ",", "id_validate", ",", "id_test", ",", "all_metas", ")", ":", "metas", "=", "{", "}", "train_count", "=", "len", "(", "id_train", ")", "metas", "[", "\"train\"", "]", "=", "train_count", "validate_count", "=", "len", "(", "id_validate", ")", "metas", "[", "\"validate\"", "]", "=", "validate_count", "test_count", "=", "len", "(", "id_test", ")", "metas", "[", "\"test\"", "]", "=", "test_count", "original_count", "=", "train_count", "+", "validate_count", "+", "test_count", "metas", "[", "\"original\"", "]", "=", "original_count", "metric_name", "=", "f\"{self.metric_name}_count_info\"", "all_metas", "[", "metric_name", "]", "=", "metas", "metas", "=", "{", "}", "train_ratio", "=", "train_count", "/", "original_count", "validate_ratio", "=", "validate_count", "/", "original_count", "test_ratio", "=", "test_count", "/", "original_count", "metas", "[", "\"train\"", "]", "=", "round", "(", "train_ratio", ",", "ROUND_NUM", ")", "metas", "[", "\"validate\"", "]", "=", "round", "(", "validate_ratio", ",", "ROUND_NUM", ")", "metas", "[", "\"test\"", "]", "=", "round", "(", "test_ratio", ",", "ROUND_NUM", ")", "metric_name", "=", "f\"{self.metric_name}_ratio_info\"", "all_metas", "[", "metric_name", "]", "=", "metas", "# stratified", "all_metas", "[", "\"stratified\"", "]", "=", "self", ".", "stratified", "return", "all_metas" ]
https://github.com/FederatedAI/FATE/blob/32540492623568ecd1afcb367360133616e02fa3/python/federatedml/model_selection/data_split/data_split.py#L189-L236
open-mmlab/mmdetection3d
c7272063e818bcf33aebc498a017a95c8d065143
mmdet3d/models/losses/paconv_regularization_loss.py
python
paconv_regularization_loss
(modules, reduction)
return corr_loss
Computes correlation loss of PAConv weight kernels as regularization. Args: modules (List[nn.Module] | :obj:`generator`): A list or a python generator of torch.nn.Modules. reduction (str): Method to reduce losses among PAConv modules. The valid reduction method are none, sum or mean. Returns: torch.Tensor: Correlation loss of kernel weights.
Computes correlation loss of PAConv weight kernels as regularization.
[ "Computes", "correlation", "loss", "of", "PAConv", "weight", "kernels", "as", "regularization", "." ]
def paconv_regularization_loss(modules, reduction): """Computes correlation loss of PAConv weight kernels as regularization. Args: modules (List[nn.Module] | :obj:`generator`): A list or a python generator of torch.nn.Modules. reduction (str): Method to reduce losses among PAConv modules. The valid reduction method are none, sum or mean. Returns: torch.Tensor: Correlation loss of kernel weights. """ corr_loss = [] for module in modules: if isinstance(module, (PAConv, PAConvCUDA)): corr_loss.append(weight_correlation(module)) corr_loss = torch.stack(corr_loss) # perform reduction corr_loss = weight_reduce_loss(corr_loss, reduction=reduction) return corr_loss
[ "def", "paconv_regularization_loss", "(", "modules", ",", "reduction", ")", ":", "corr_loss", "=", "[", "]", "for", "module", "in", "modules", ":", "if", "isinstance", "(", "module", ",", "(", "PAConv", ",", "PAConvCUDA", ")", ")", ":", "corr_loss", ".", "append", "(", "weight_correlation", "(", "module", ")", ")", "corr_loss", "=", "torch", ".", "stack", "(", "corr_loss", ")", "# perform reduction", "corr_loss", "=", "weight_reduce_loss", "(", "corr_loss", ",", "reduction", "=", "reduction", ")", "return", "corr_loss" ]
https://github.com/open-mmlab/mmdetection3d/blob/c7272063e818bcf33aebc498a017a95c8d065143/mmdet3d/models/losses/paconv_regularization_loss.py#L47-L68
openstack/magnum
fa298eeab19b1d87070d72c7c4fb26cd75b0781e
magnum/common/context.py
python
make_admin_context
(show_deleted=False, all_tenants=False)
return context
Create an administrator context. :param show_deleted: if True, will show deleted items when query db
Create an administrator context.
[ "Create", "an", "administrator", "context", "." ]
def make_admin_context(show_deleted=False, all_tenants=False): """Create an administrator context. :param show_deleted: if True, will show deleted items when query db """ context = RequestContext(user_id=None, project=None, is_admin=True, show_deleted=show_deleted, all_tenants=all_tenants) return context
[ "def", "make_admin_context", "(", "show_deleted", "=", "False", ",", "all_tenants", "=", "False", ")", ":", "context", "=", "RequestContext", "(", "user_id", "=", "None", ",", "project", "=", "None", ",", "is_admin", "=", "True", ",", "show_deleted", "=", "show_deleted", ",", "all_tenants", "=", "all_tenants", ")", "return", "context" ]
https://github.com/openstack/magnum/blob/fa298eeab19b1d87070d72c7c4fb26cd75b0781e/magnum/common/context.py#L102-L112
larryhastings/gilectomy
4315ec3f1d6d4f813cc82ce27a24e7f784dbfc1a
Lib/zipfile.py
python
_ZipDecrypter.__call__
(self, c)
return c
Decrypt a single character.
Decrypt a single character.
[ "Decrypt", "a", "single", "character", "." ]
def __call__(self, c): """Decrypt a single character.""" assert isinstance(c, int) k = self.key2 | 2 c = c ^ (((k * (k^1)) >> 8) & 255) self._UpdateKeys(c) return c
[ "def", "__call__", "(", "self", ",", "c", ")", ":", "assert", "isinstance", "(", "c", ",", "int", ")", "k", "=", "self", ".", "key2", "|", "2", "c", "=", "c", "^", "(", "(", "(", "k", "*", "(", "k", "^", "1", ")", ")", ">>", "8", ")", "&", "255", ")", "self", ".", "_UpdateKeys", "(", "c", ")", "return", "c" ]
https://github.com/larryhastings/gilectomy/blob/4315ec3f1d6d4f813cc82ce27a24e7f784dbfc1a/Lib/zipfile.py#L561-L567
seantis/seantis-questionnaire
698c77b3d707635f50bcd86e7f1c94e94061b0f5
questionnaire/request_cache.py
python
get_request_cache
()
return _request_cache[currentThread()]
[]
def get_request_cache(): assert _installed_middleware, 'RequestCacheMiddleware not loaded' return _request_cache[currentThread()]
[ "def", "get_request_cache", "(", ")", ":", "assert", "_installed_middleware", ",", "'RequestCacheMiddleware not loaded'", "return", "_request_cache", "[", "currentThread", "(", ")", "]" ]
https://github.com/seantis/seantis-questionnaire/blob/698c77b3d707635f50bcd86e7f1c94e94061b0f5/questionnaire/request_cache.py#L16-L18
junyanz/pytorch-CycleGAN-and-pix2pix
003efc4c8819de47ff11b5a0af7ba09aee7f5fc1
models/networks.py
python
ResnetBlock.__init__
(self, dim, padding_type, norm_layer, use_dropout, use_bias)
Initialize the Resnet block A resnet block is a conv block with skip connections We construct a conv block with build_conv_block function, and implement skip connections in <forward> function. Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
Initialize the Resnet block
[ "Initialize", "the", "Resnet", "block" ]
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias): """Initialize the Resnet block A resnet block is a conv block with skip connections We construct a conv block with build_conv_block function, and implement skip connections in <forward> function. Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf """ super(ResnetBlock, self).__init__() self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
[ "def", "__init__", "(", "self", ",", "dim", ",", "padding_type", ",", "norm_layer", ",", "use_dropout", ",", "use_bias", ")", ":", "super", "(", "ResnetBlock", ",", "self", ")", ".", "__init__", "(", ")", "self", ".", "conv_block", "=", "self", ".", "build_conv_block", "(", "dim", ",", "padding_type", ",", "norm_layer", ",", "use_dropout", ",", "use_bias", ")" ]
https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/003efc4c8819de47ff11b5a0af7ba09aee7f5fc1/models/networks.py#L379-L388
internetarchive/brozzler
427908e8210fcbaab22ce8254bd8f8efb72d33c0
brozzler/cli.py
python
brozzler_worker
(argv=None)
Main entry point for brozzler, gets sites and pages to brozzle from rethinkdb, brozzles them.
Main entry point for brozzler, gets sites and pages to brozzle from rethinkdb, brozzles them.
[ "Main", "entry", "point", "for", "brozzler", "gets", "sites", "and", "pages", "to", "brozzle", "from", "rethinkdb", "brozzles", "them", "." ]
def brozzler_worker(argv=None): ''' Main entry point for brozzler, gets sites and pages to brozzle from rethinkdb, brozzles them. ''' argv = argv or sys.argv arg_parser = argparse.ArgumentParser( prog=os.path.basename(argv[0]), formatter_class=BetterArgumentDefaultsHelpFormatter) add_rethinkdb_options(arg_parser) arg_parser.add_argument( '-e', '--chrome-exe', dest='chrome_exe', default=suggest_default_chrome_exe(), help='executable to use to invoke chrome') arg_parser.add_argument( '-n', '--max-browsers', dest='max_browsers', default='1', help='max number of chrome instances simultaneously browsing pages') arg_parser.add_argument( '--proxy', dest='proxy', default=None, help='http proxy') arg_parser.add_argument( '--browser_throughput', type=int, dest='download_throughput', default=-1, help='Chrome DevTools downloadThroughput for Network.emulateNetworkConditions') arg_parser.add_argument( '--warcprox-auto', dest='warcprox_auto', action='store_true', help=( 'when needed, choose an available instance of warcprox from ' 'the rethinkdb service registry')) arg_parser.add_argument( '--skip-extract-outlinks', dest='skip_extract_outlinks', action='store_true', help=argparse.SUPPRESS) arg_parser.add_argument( '--skip-visit-hashtags', dest='skip_visit_hashtags', action='store_true', help=argparse.SUPPRESS) arg_parser.add_argument( '--skip-youtube-dl', dest='skip_youtube_dl', action='store_true', help=argparse.SUPPRESS) add_common_options(arg_parser, argv) args = arg_parser.parse_args(args=argv[1:]) configure_logging(args) brozzler.chrome.check_version(args.chrome_exe) def dump_state(signum, frame): signal.signal(signal.SIGQUIT, signal.SIG_IGN) try: state_strs = [] frames = sys._current_frames() threads = {th.ident: th for th in threading.enumerate()} for ident in frames: if threads[ident]: state_strs.append(str(threads[ident])) else: state_strs.append('<???:thread:ident=%s>' % ident) stack = traceback.format_stack(frames[ident]) state_strs.append(''.join(stack)) logging.info( 'dumping state (caught signal %s)\n%s' % ( signum, '\n'.join(state_strs))) except BaseException as e: logging.error('exception dumping state: %s' % e) finally: signal.signal(signal.SIGQUIT, dump_state) rr = rethinker(args) frontier = brozzler.RethinkDbFrontier(rr) service_registry = doublethink.ServiceRegistry(rr) worker = brozzler.worker.BrozzlerWorker( frontier, service_registry, max_browsers=int(args.max_browsers), chrome_exe=args.chrome_exe, proxy=args.proxy, warcprox_auto=args.warcprox_auto, skip_extract_outlinks=args.skip_extract_outlinks, skip_visit_hashtags=args.skip_visit_hashtags, skip_youtube_dl=args.skip_youtube_dl) signal.signal(signal.SIGQUIT, dump_state) signal.signal(signal.SIGTERM, lambda s,f: worker.stop()) signal.signal(signal.SIGINT, lambda s,f: worker.stop()) th = threading.Thread(target=worker.run, name='BrozzlerWorkerThread') th.start() th.join() logging.info('brozzler-worker is all done, exiting')
[ "def", "brozzler_worker", "(", "argv", "=", "None", ")", ":", "argv", "=", "argv", "or", "sys", ".", "argv", "arg_parser", "=", "argparse", ".", "ArgumentParser", "(", "prog", "=", "os", ".", "path", ".", "basename", "(", "argv", "[", "0", "]", ")", ",", "formatter_class", "=", "BetterArgumentDefaultsHelpFormatter", ")", "add_rethinkdb_options", "(", "arg_parser", ")", "arg_parser", ".", "add_argument", "(", "'-e'", ",", "'--chrome-exe'", ",", "dest", "=", "'chrome_exe'", ",", "default", "=", "suggest_default_chrome_exe", "(", ")", ",", "help", "=", "'executable to use to invoke chrome'", ")", "arg_parser", ".", "add_argument", "(", "'-n'", ",", "'--max-browsers'", ",", "dest", "=", "'max_browsers'", ",", "default", "=", "'1'", ",", "help", "=", "'max number of chrome instances simultaneously browsing pages'", ")", "arg_parser", ".", "add_argument", "(", "'--proxy'", ",", "dest", "=", "'proxy'", ",", "default", "=", "None", ",", "help", "=", "'http proxy'", ")", "arg_parser", ".", "add_argument", "(", "'--browser_throughput'", ",", "type", "=", "int", ",", "dest", "=", "'download_throughput'", ",", "default", "=", "-", "1", ",", "help", "=", "'Chrome DevTools downloadThroughput for Network.emulateNetworkConditions'", ")", "arg_parser", ".", "add_argument", "(", "'--warcprox-auto'", ",", "dest", "=", "'warcprox_auto'", ",", "action", "=", "'store_true'", ",", "help", "=", "(", "'when needed, choose an available instance of warcprox from '", "'the rethinkdb service registry'", ")", ")", "arg_parser", ".", "add_argument", "(", "'--skip-extract-outlinks'", ",", "dest", "=", "'skip_extract_outlinks'", ",", "action", "=", "'store_true'", ",", "help", "=", "argparse", ".", "SUPPRESS", ")", "arg_parser", ".", "add_argument", "(", "'--skip-visit-hashtags'", ",", "dest", "=", "'skip_visit_hashtags'", ",", "action", "=", "'store_true'", ",", "help", "=", "argparse", ".", "SUPPRESS", ")", "arg_parser", ".", "add_argument", "(", "'--skip-youtube-dl'", ",", "dest", "=", "'skip_youtube_dl'", ",", "action", "=", "'store_true'", ",", "help", "=", "argparse", ".", "SUPPRESS", ")", "add_common_options", "(", "arg_parser", ",", "argv", ")", "args", "=", "arg_parser", ".", "parse_args", "(", "args", "=", "argv", "[", "1", ":", "]", ")", "configure_logging", "(", "args", ")", "brozzler", ".", "chrome", ".", "check_version", "(", "args", ".", "chrome_exe", ")", "def", "dump_state", "(", "signum", ",", "frame", ")", ":", "signal", ".", "signal", "(", "signal", ".", "SIGQUIT", ",", "signal", ".", "SIG_IGN", ")", "try", ":", "state_strs", "=", "[", "]", "frames", "=", "sys", ".", "_current_frames", "(", ")", "threads", "=", "{", "th", ".", "ident", ":", "th", "for", "th", "in", "threading", ".", "enumerate", "(", ")", "}", "for", "ident", "in", "frames", ":", "if", "threads", "[", "ident", "]", ":", "state_strs", ".", "append", "(", "str", "(", "threads", "[", "ident", "]", ")", ")", "else", ":", "state_strs", ".", "append", "(", "'<???:thread:ident=%s>'", "%", "ident", ")", "stack", "=", "traceback", ".", "format_stack", "(", "frames", "[", "ident", "]", ")", "state_strs", ".", "append", "(", "''", ".", "join", "(", "stack", ")", ")", "logging", ".", "info", "(", "'dumping state (caught signal %s)\\n%s'", "%", "(", "signum", ",", "'\\n'", ".", "join", "(", "state_strs", ")", ")", ")", "except", "BaseException", "as", "e", ":", "logging", ".", "error", "(", "'exception dumping state: %s'", "%", "e", ")", "finally", ":", "signal", ".", "signal", "(", "signal", ".", "SIGQUIT", ",", "dump_state", ")", "rr", "=", "rethinker", "(", "args", ")", "frontier", "=", "brozzler", ".", "RethinkDbFrontier", "(", "rr", ")", "service_registry", "=", "doublethink", ".", "ServiceRegistry", "(", "rr", ")", "worker", "=", "brozzler", ".", "worker", ".", "BrozzlerWorker", "(", "frontier", ",", "service_registry", ",", "max_browsers", "=", "int", "(", "args", ".", "max_browsers", ")", ",", "chrome_exe", "=", "args", ".", "chrome_exe", ",", "proxy", "=", "args", ".", "proxy", ",", "warcprox_auto", "=", "args", ".", "warcprox_auto", ",", "skip_extract_outlinks", "=", "args", ".", "skip_extract_outlinks", ",", "skip_visit_hashtags", "=", "args", ".", "skip_visit_hashtags", ",", "skip_youtube_dl", "=", "args", ".", "skip_youtube_dl", ")", "signal", ".", "signal", "(", "signal", ".", "SIGQUIT", ",", "dump_state", ")", "signal", ".", "signal", "(", "signal", ".", "SIGTERM", ",", "lambda", "s", ",", "f", ":", "worker", ".", "stop", "(", ")", ")", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "lambda", "s", ",", "f", ":", "worker", ".", "stop", "(", ")", ")", "th", "=", "threading", ".", "Thread", "(", "target", "=", "worker", ".", "run", ",", "name", "=", "'BrozzlerWorkerThread'", ")", "th", ".", "start", "(", ")", "th", ".", "join", "(", ")", "logging", ".", "info", "(", "'brozzler-worker is all done, exiting'", ")" ]
https://github.com/internetarchive/brozzler/blob/427908e8210fcbaab22ce8254bd8f8efb72d33c0/brozzler/cli.py#L303-L384
VIDA-NYU/reprozip
67bbe8d2e22e0493ba0ccc78521729b49dd70a1d
reprozip/reprozip/traceutils.py
python
combine_traces
(traces, target)
Combines multiple trace databases into one. The runs from the original traces are appended ('run_id' field gets translated to avoid conflicts). :param traces: List of trace database filenames. :type traces: [Path] :param target: Directory where to write the new database and associated configuration file. :type target: Path
Combines multiple trace databases into one.
[ "Combines", "multiple", "trace", "databases", "into", "one", "." ]
def combine_traces(traces, target): """Combines multiple trace databases into one. The runs from the original traces are appended ('run_id' field gets translated to avoid conflicts). :param traces: List of trace database filenames. :type traces: [Path] :param target: Directory where to write the new database and associated configuration file. :type target: Path """ # We are probably overwriting one of the traces we're reading, so write to # a temporary file first then move it fd, output = Path.tempfile('.sqlite3', 'reprozip_combined_') if PY3: # On PY3, connect() only accepts unicode conn = sqlite3.connect(str(output)) else: conn = sqlite3.connect(output.path) os.close(fd) conn.row_factory = sqlite3.Row # Create the schema create_schema(conn) # Temporary database with lookup tables conn.execute( ''' ATTACH DATABASE '' AS maps; ''') conn.execute( ''' CREATE TABLE maps.map_runs( old INTEGER NOT NULL, new INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT ); ''') conn.execute( ''' CREATE TABLE maps.map_processes( old INTEGER NOT NULL, new INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT ); ''') # Do the merge for other in traces: logger.info("Attaching database %s", other) # Attach the other trace conn.execute( ''' ATTACH DATABASE ? AS trace; ''', (str(other),)) # Add runs to lookup table conn.execute( ''' INSERT INTO maps.map_runs(old) SELECT DISTINCT run_id AS old FROM trace.processes ORDER BY run_id; ''') logger.info( "%d rows in maps.map_runs", list(conn.execute('SELECT COUNT(*) FROM maps.map_runs;'))[0][0]) # Add processes to lookup table conn.execute( ''' INSERT INTO maps.map_processes(old) SELECT id AS old FROM trace.processes ORDER BY id; ''') logger.info( "%d rows in maps.map_processes", list(conn.execute('SELECT COUNT(*) FROM maps.map_processes;')) [0][0]) # processes logger.info("Insert processes...") conn.execute( ''' INSERT INTO processes(id, run_id, parent, timestamp, is_thread, exitcode) SELECT p.new AS id, r.new AS run_id, parent, timestamp, is_thread, exitcode FROM trace.processes t INNER JOIN maps.map_runs r ON t.run_id = r.old INNER JOIN maps.map_processes p ON t.id = p.old ORDER BY t.id; ''') # opened_files logger.info("Insert opened_files...") conn.execute( ''' INSERT INTO opened_files(run_id, name, timestamp, mode, is_directory, process) SELECT r.new AS run_id, name, timestamp, mode, is_directory, p.new AS process FROM trace.opened_files t INNER JOIN maps.map_runs r ON t.run_id = r.old INNER JOIN maps.map_processes p ON t.process = p.old ORDER BY t.id; ''') # executed_files logger.info("Insert executed_files...") conn.execute( ''' INSERT INTO executed_files(name, run_id, timestamp, process, argv, envp, workingdir) SELECT name, r.new AS run_id, timestamp, p.new AS process, argv, envp, workingdir FROM trace.executed_files t INNER JOIN maps.map_runs r ON t.run_id = r.old INNER JOIN maps.map_processes p ON t.process = p.old ORDER BY t.id; ''') # Flush maps conn.execute( ''' DELETE FROM maps.map_runs; ''') conn.execute( ''' DELETE FROM maps.map_processes; ''') # An implicit transaction gets created. Python used to implicitly # commit it, but no longer does as of 3.6, so we have to explicitly # commit before detaching. conn.commit() # Detach conn.execute( ''' DETACH DATABASE trace; ''') # See above. conn.commit() conn.execute( ''' DETACH DATABASE maps; ''') conn.commit() conn.close() # Move database to final destination if not target.exists(): target.mkdir() output.move(target / 'trace.sqlite3')
[ "def", "combine_traces", "(", "traces", ",", "target", ")", ":", "# We are probably overwriting one of the traces we're reading, so write to", "# a temporary file first then move it", "fd", ",", "output", "=", "Path", ".", "tempfile", "(", "'.sqlite3'", ",", "'reprozip_combined_'", ")", "if", "PY3", ":", "# On PY3, connect() only accepts unicode", "conn", "=", "sqlite3", ".", "connect", "(", "str", "(", "output", ")", ")", "else", ":", "conn", "=", "sqlite3", ".", "connect", "(", "output", ".", "path", ")", "os", ".", "close", "(", "fd", ")", "conn", ".", "row_factory", "=", "sqlite3", ".", "Row", "# Create the schema", "create_schema", "(", "conn", ")", "# Temporary database with lookup tables", "conn", ".", "execute", "(", "'''\n ATTACH DATABASE '' AS maps;\n '''", ")", "conn", ".", "execute", "(", "'''\n CREATE TABLE maps.map_runs(\n old INTEGER NOT NULL,\n new INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT\n );\n '''", ")", "conn", ".", "execute", "(", "'''\n CREATE TABLE maps.map_processes(\n old INTEGER NOT NULL,\n new INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT\n );\n '''", ")", "# Do the merge", "for", "other", "in", "traces", ":", "logger", ".", "info", "(", "\"Attaching database %s\"", ",", "other", ")", "# Attach the other trace", "conn", ".", "execute", "(", "'''\n ATTACH DATABASE ? AS trace;\n '''", ",", "(", "str", "(", "other", ")", ",", ")", ")", "# Add runs to lookup table", "conn", ".", "execute", "(", "'''\n INSERT INTO maps.map_runs(old)\n SELECT DISTINCT run_id AS old\n FROM trace.processes\n ORDER BY run_id;\n '''", ")", "logger", ".", "info", "(", "\"%d rows in maps.map_runs\"", ",", "list", "(", "conn", ".", "execute", "(", "'SELECT COUNT(*) FROM maps.map_runs;'", ")", ")", "[", "0", "]", "[", "0", "]", ")", "# Add processes to lookup table", "conn", ".", "execute", "(", "'''\n INSERT INTO maps.map_processes(old)\n SELECT id AS old\n FROM trace.processes\n ORDER BY id;\n '''", ")", "logger", ".", "info", "(", "\"%d rows in maps.map_processes\"", ",", "list", "(", "conn", ".", "execute", "(", "'SELECT COUNT(*) FROM maps.map_processes;'", ")", ")", "[", "0", "]", "[", "0", "]", ")", "# processes", "logger", ".", "info", "(", "\"Insert processes...\"", ")", "conn", ".", "execute", "(", "'''\n INSERT INTO processes(id, run_id, parent,\n timestamp, is_thread, exitcode)\n SELECT p.new AS id, r.new AS run_id, parent,\n timestamp, is_thread, exitcode\n FROM trace.processes t\n INNER JOIN maps.map_runs r ON t.run_id = r.old\n INNER JOIN maps.map_processes p ON t.id = p.old\n ORDER BY t.id;\n '''", ")", "# opened_files", "logger", ".", "info", "(", "\"Insert opened_files...\"", ")", "conn", ".", "execute", "(", "'''\n INSERT INTO opened_files(run_id, name, timestamp,\n mode, is_directory, process)\n SELECT r.new AS run_id, name, timestamp,\n mode, is_directory, p.new AS process\n FROM trace.opened_files t\n INNER JOIN maps.map_runs r ON t.run_id = r.old\n INNER JOIN maps.map_processes p ON t.process = p.old\n ORDER BY t.id;\n '''", ")", "# executed_files", "logger", ".", "info", "(", "\"Insert executed_files...\"", ")", "conn", ".", "execute", "(", "'''\n INSERT INTO executed_files(name, run_id, timestamp, process,\n argv, envp, workingdir)\n SELECT name, r.new AS run_id, timestamp, p.new AS process,\n argv, envp, workingdir\n FROM trace.executed_files t\n INNER JOIN maps.map_runs r ON t.run_id = r.old\n INNER JOIN maps.map_processes p ON t.process = p.old\n ORDER BY t.id;\n '''", ")", "# Flush maps", "conn", ".", "execute", "(", "'''\n DELETE FROM maps.map_runs;\n '''", ")", "conn", ".", "execute", "(", "'''\n DELETE FROM maps.map_processes;\n '''", ")", "# An implicit transaction gets created. Python used to implicitly", "# commit it, but no longer does as of 3.6, so we have to explicitly", "# commit before detaching.", "conn", ".", "commit", "(", ")", "# Detach", "conn", ".", "execute", "(", "'''\n DETACH DATABASE trace;\n '''", ")", "# See above.", "conn", ".", "commit", "(", ")", "conn", ".", "execute", "(", "'''\n DETACH DATABASE maps;\n '''", ")", "conn", ".", "commit", "(", ")", "conn", ".", "close", "(", ")", "# Move database to final destination", "if", "not", "target", ".", "exists", "(", ")", ":", "target", ".", "mkdir", "(", ")", "output", ".", "move", "(", "target", "/", "'trace.sqlite3'", ")" ]
https://github.com/VIDA-NYU/reprozip/blob/67bbe8d2e22e0493ba0ccc78521729b49dd70a1d/reprozip/reprozip/traceutils.py#L100-L261
alephsecurity/abootool
4117b82d07e6b3a80eeab560d2140ae2dcfe2463
config.py
python
MetaConfig.__repr__
(cls)
return cls.get_config().__repr__()
[]
def __repr__(cls): return cls.get_config().__repr__()
[ "def", "__repr__", "(", "cls", ")", ":", "return", "cls", ".", "get_config", "(", ")", ".", "__repr__", "(", ")" ]
https://github.com/alephsecurity/abootool/blob/4117b82d07e6b3a80eeab560d2140ae2dcfe2463/config.py#L22-L23
SpamScope/mail-parser
87ad78f1f1028b509bce7fe684117619d84d0d40
mailparser/mailparser.py
python
MailParser.received_json
(self)
return json.dumps(self.received, ensure_ascii=False, indent=2)
Return a JSON of all received headers
Return a JSON of all received headers
[ "Return", "a", "JSON", "of", "all", "received", "headers" ]
def received_json(self): """ Return a JSON of all received headers """ return json.dumps(self.received, ensure_ascii=False, indent=2)
[ "def", "received_json", "(", "self", ")", ":", "return", "json", ".", "dumps", "(", "self", ".", "received", ",", "ensure_ascii", "=", "False", ",", "indent", "=", "2", ")" ]
https://github.com/SpamScope/mail-parser/blob/87ad78f1f1028b509bce7fe684117619d84d0d40/mailparser/mailparser.py#L571-L575
ales-tsurko/cells
4cf7e395cd433762bea70cdc863a346f3a6fe1d0
packaging/macos/python/lib/python3.7/inspect.py
python
getcoroutinestate
(coroutine)
return CORO_SUSPENDED
Get current state of a coroutine object. Possible states are: CORO_CREATED: Waiting to start execution. CORO_RUNNING: Currently being executed by the interpreter. CORO_SUSPENDED: Currently suspended at an await expression. CORO_CLOSED: Execution has completed.
Get current state of a coroutine object.
[ "Get", "current", "state", "of", "a", "coroutine", "object", "." ]
def getcoroutinestate(coroutine): """Get current state of a coroutine object. Possible states are: CORO_CREATED: Waiting to start execution. CORO_RUNNING: Currently being executed by the interpreter. CORO_SUSPENDED: Currently suspended at an await expression. CORO_CLOSED: Execution has completed. """ if coroutine.cr_running: return CORO_RUNNING if coroutine.cr_frame is None: return CORO_CLOSED if coroutine.cr_frame.f_lasti == -1: return CORO_CREATED return CORO_SUSPENDED
[ "def", "getcoroutinestate", "(", "coroutine", ")", ":", "if", "coroutine", ".", "cr_running", ":", "return", "CORO_RUNNING", "if", "coroutine", ".", "cr_frame", "is", "None", ":", "return", "CORO_CLOSED", "if", "coroutine", ".", "cr_frame", ".", "f_lasti", "==", "-", "1", ":", "return", "CORO_CREATED", "return", "CORO_SUSPENDED" ]
https://github.com/ales-tsurko/cells/blob/4cf7e395cd433762bea70cdc863a346f3a6fe1d0/packaging/macos/python/lib/python3.7/inspect.py#L1661-L1676
atlassian-api/atlassian-python-api
6d8545a790c3aae10b75bdc225fb5c3a0aee44db
atlassian/service_desk.py
python
ServiceDesk.create_organization
(self, name)
return self.post(url, headers=self.experimental_headers, data=data)
To create an organization Jira administrator global or agent permission is required depending on the settings :param name: str :return: Organization data
To create an organization Jira administrator global or agent permission is required depending on the settings
[ "To", "create", "an", "organization", "Jira", "administrator", "global", "or", "agent", "permission", "is", "required", "depending", "on", "the", "settings" ]
def create_organization(self, name): """ To create an organization Jira administrator global or agent permission is required depending on the settings :param name: str :return: Organization data """ log.warning("Creating organization...") url = "rest/servicedeskapi/organization" data = {"name": name} return self.post(url, headers=self.experimental_headers, data=data)
[ "def", "create_organization", "(", "self", ",", "name", ")", ":", "log", ".", "warning", "(", "\"Creating organization...\"", ")", "url", "=", "\"rest/servicedeskapi/organization\"", "data", "=", "{", "\"name\"", ":", "name", "}", "return", "self", ".", "post", "(", "url", ",", "headers", "=", "self", ".", "experimental_headers", ",", "data", "=", "data", ")" ]
https://github.com/atlassian-api/atlassian-python-api/blob/6d8545a790c3aae10b75bdc225fb5c3a0aee44db/atlassian/service_desk.py#L326-L338
qxf2/qxf2-page-object-model
8b2978e5bd2a5413d581f9e29a2098e5f5e742fd
page_objects/Mobile_Base_Page.py
python
Mobile_Base_Page.get_failure_message_list
(self)
return self.failure_message_list
Return the failure message list
Return the failure message list
[ "Return", "the", "failure", "message", "list" ]
def get_failure_message_list(self): "Return the failure message list" return self.failure_message_list
[ "def", "get_failure_message_list", "(", "self", ")", ":", "return", "self", ".", "failure_message_list" ]
https://github.com/qxf2/qxf2-page-object-model/blob/8b2978e5bd2a5413d581f9e29a2098e5f5e742fd/page_objects/Mobile_Base_Page.py#L64-L66
openedx/edx-platform
68dd185a0ab45862a2a61e0f803d7e03d2be71b5
common/lib/xmodule/xmodule/x_module.py
python
ModuleSystemShim.user_is_staff
(self)
return None
Returns whether the current user has staff access to the course. Deprecated in favor of the user service.
Returns whether the current user has staff access to the course.
[ "Returns", "whether", "the", "current", "user", "has", "staff", "access", "to", "the", "course", "." ]
def user_is_staff(self): """ Returns whether the current user has staff access to the course. Deprecated in favor of the user service. """ warnings.warn( 'runtime.user_is_staff is deprecated. Please use the user service instead.', DeprecationWarning, stacklevel=3, ) user_service = self._services.get('user') if user_service: return self._services['user'].get_current_user().opt_attrs.get(ATTR_KEY_USER_IS_STAFF) return None
[ "def", "user_is_staff", "(", "self", ")", ":", "warnings", ".", "warn", "(", "'runtime.user_is_staff is deprecated. Please use the user service instead.'", ",", "DeprecationWarning", ",", "stacklevel", "=", "3", ",", ")", "user_service", "=", "self", ".", "_services", ".", "get", "(", "'user'", ")", "if", "user_service", ":", "return", "self", ".", "_services", "[", "'user'", "]", ".", "get_current_user", "(", ")", ".", "opt_attrs", ".", "get", "(", "ATTR_KEY_USER_IS_STAFF", ")", "return", "None" ]
https://github.com/openedx/edx-platform/blob/68dd185a0ab45862a2a61e0f803d7e03d2be71b5/common/lib/xmodule/xmodule/x_module.py#L1799-L1812
cszn/KAIR
72e93351bca41d1b1f6a4c3e1957f5bffccc7101
utils/utils_image.py
python
imssave
(imgs, img_path)
imgs: list, N images of size WxHxC
imgs: list, N images of size WxHxC
[ "imgs", ":", "list", "N", "images", "of", "size", "WxHxC" ]
def imssave(imgs, img_path): """ imgs: list, N images of size WxHxC """ img_name, ext = os.path.splitext(os.path.basename(img_path)) for i, img in enumerate(imgs): if img.ndim == 3: img = img[:, :, [2, 1, 0]] new_path = os.path.join(os.path.dirname(img_path), img_name+str('_{:04d}'.format(i))+'.png') cv2.imwrite(new_path, img)
[ "def", "imssave", "(", "imgs", ",", "img_path", ")", ":", "img_name", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "img_path", ")", ")", "for", "i", ",", "img", "in", "enumerate", "(", "imgs", ")", ":", "if", "img", ".", "ndim", "==", "3", ":", "img", "=", "img", "[", ":", ",", ":", ",", "[", "2", ",", "1", ",", "0", "]", "]", "new_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "img_path", ")", ",", "img_name", "+", "str", "(", "'_{:04d}'", ".", "format", "(", "i", ")", ")", "+", "'.png'", ")", "cv2", ".", "imwrite", "(", "new_path", ",", "img", ")" ]
https://github.com/cszn/KAIR/blob/72e93351bca41d1b1f6a4c3e1957f5bffccc7101/utils/utils_image.py#L116-L125
tav/pylibs
3c16b843681f54130ee6a022275289cadb2f2a69
demjson.py
python
JSON.skipws
(self, txt, i=0, imax=None, skip_comments=True)
Skips whitespace.
Skips whitespace.
[ "Skips", "whitespace", "." ]
def skipws(self, txt, i=0, imax=None, skip_comments=True): """Skips whitespace. """ if not self._allow_comments and not self._allow_unicode_whitespace: if imax is None: imax = len(txt) while i < imax and txt[i] in ' \r\n\t': i += 1 return i else: return self.skipws_any(txt, i, imax, skip_comments)
[ "def", "skipws", "(", "self", ",", "txt", ",", "i", "=", "0", ",", "imax", "=", "None", ",", "skip_comments", "=", "True", ")", ":", "if", "not", "self", ".", "_allow_comments", "and", "not", "self", ".", "_allow_unicode_whitespace", ":", "if", "imax", "is", "None", ":", "imax", "=", "len", "(", "txt", ")", "while", "i", "<", "imax", "and", "txt", "[", "i", "]", "in", "' \\r\\n\\t'", ":", "i", "+=", "1", "return", "i", "else", ":", "return", "self", ".", "skipws_any", "(", "txt", ",", "i", ",", "imax", ",", "skip_comments", ")" ]
https://github.com/tav/pylibs/blob/3c16b843681f54130ee6a022275289cadb2f2a69/demjson.py#L1499-L1509
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/site-packages/nbconvert/preprocessors/highlightmagics.py
python
HighlightMagicsPreprocessor.which_magic_language
(self, source)
When a cell uses another language through a magic extension, the other language is returned. If no language magic is detected, this function returns None. Parameters ---------- source: str Source code of the cell to highlight
When a cell uses another language through a magic extension, the other language is returned. If no language magic is detected, this function returns None.
[ "When", "a", "cell", "uses", "another", "language", "through", "a", "magic", "extension", "the", "other", "language", "is", "returned", ".", "If", "no", "language", "magic", "is", "detected", "this", "function", "returns", "None", "." ]
def which_magic_language(self, source): """ When a cell uses another language through a magic extension, the other language is returned. If no language magic is detected, this function returns None. Parameters ---------- source: str Source code of the cell to highlight """ m = self.re_magic_language.match(source) if m: # By construction of the re, the matched language must be in the # languages dictionary return self.default_languages[m.group(1)] else: return None
[ "def", "which_magic_language", "(", "self", ",", "source", ")", ":", "m", "=", "self", ".", "re_magic_language", ".", "match", "(", "source", ")", "if", "m", ":", "# By construction of the re, the matched language must be in the", "# languages dictionary", "return", "self", ".", "default_languages", "[", "m", ".", "group", "(", "1", ")", "]", "else", ":", "return", "None" ]
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/nbconvert/preprocessors/highlightmagics.py#L58-L77
jakewaldron/PlexEmail
8e560407a87f58e02d1f1e8048dacb9be6e35c03
scripts/cloudinary/utils.py
python
cloudinary_url
(source, **options)
return (source, options)
[]
def cloudinary_url(source, **options): original_source = source type = options.pop("type", "upload") if type == 'fetch': options["fetch_format"] = options.get("fetch_format", options.pop("format", None)) transformation, options = generate_transformation_string(**options) resource_type = options.pop("resource_type", "image") version = options.pop("version", None) format = options.pop("format", None) cdn_subdomain = options.pop("cdn_subdomain", cloudinary.config().cdn_subdomain) secure_cdn_subdomain = options.pop("secure_cdn_subdomain", cloudinary.config().secure_cdn_subdomain) cname = options.pop("cname", cloudinary.config().cname) shorten = options.pop("shorten", cloudinary.config().shorten) cloud_name = options.pop("cloud_name", cloudinary.config().cloud_name or None) if cloud_name is None: raise ValueError("Must supply cloud_name in tag or in configuration") secure = options.pop("secure", cloudinary.config().secure) private_cdn = options.pop("private_cdn", cloudinary.config().private_cdn) secure_distribution = options.pop("secure_distribution", cloudinary.config().secure_distribution) sign_url = options.pop("sign_url", cloudinary.config().sign_url) api_secret = options.pop("api_secret", cloudinary.config().api_secret) url_suffix = options.pop("url_suffix", None) use_root_path = options.pop("use_root_path", cloudinary.config().use_root_path) if url_suffix and not private_cdn: raise ValueError("URL Suffix only supported in private CDN") if (not source) or type == "upload" and re.match(r'^https?:', source): return (original_source, options) resource_type, type = finalize_resource_type(resource_type, type, url_suffix, use_root_path, shorten) source, source_to_sign = finalize_source(source, format, url_suffix) if source_to_sign.find("/") >= 0 and not re.match(r'^https?:/', source_to_sign) and not re.match(r'^v[0-9]+', source_to_sign) and not version: version = "1" if version: version = "v" + str(version) transformation = re.sub(r'([^:])/+', r'\1/', transformation) signature = None if sign_url: to_sign = "/".join(__compact([transformation, source_to_sign])) signature = "s--" + to_string(base64.urlsafe_b64encode( hashlib.sha1(to_bytes(to_sign + api_secret)).digest() )[0:8]) + "--" prefix = unsigned_download_url_prefix(source, cloud_name, private_cdn, cdn_subdomain, secure_cdn_subdomain, cname, secure, secure_distribution) source = "/".join(__compact([prefix, resource_type, type, signature, transformation, version, source])) return (source, options)
[ "def", "cloudinary_url", "(", "source", ",", "*", "*", "options", ")", ":", "original_source", "=", "source", "type", "=", "options", ".", "pop", "(", "\"type\"", ",", "\"upload\"", ")", "if", "type", "==", "'fetch'", ":", "options", "[", "\"fetch_format\"", "]", "=", "options", ".", "get", "(", "\"fetch_format\"", ",", "options", ".", "pop", "(", "\"format\"", ",", "None", ")", ")", "transformation", ",", "options", "=", "generate_transformation_string", "(", "*", "*", "options", ")", "resource_type", "=", "options", ".", "pop", "(", "\"resource_type\"", ",", "\"image\"", ")", "version", "=", "options", ".", "pop", "(", "\"version\"", ",", "None", ")", "format", "=", "options", ".", "pop", "(", "\"format\"", ",", "None", ")", "cdn_subdomain", "=", "options", ".", "pop", "(", "\"cdn_subdomain\"", ",", "cloudinary", ".", "config", "(", ")", ".", "cdn_subdomain", ")", "secure_cdn_subdomain", "=", "options", ".", "pop", "(", "\"secure_cdn_subdomain\"", ",", "cloudinary", ".", "config", "(", ")", ".", "secure_cdn_subdomain", ")", "cname", "=", "options", ".", "pop", "(", "\"cname\"", ",", "cloudinary", ".", "config", "(", ")", ".", "cname", ")", "shorten", "=", "options", ".", "pop", "(", "\"shorten\"", ",", "cloudinary", ".", "config", "(", ")", ".", "shorten", ")", "cloud_name", "=", "options", ".", "pop", "(", "\"cloud_name\"", ",", "cloudinary", ".", "config", "(", ")", ".", "cloud_name", "or", "None", ")", "if", "cloud_name", "is", "None", ":", "raise", "ValueError", "(", "\"Must supply cloud_name in tag or in configuration\"", ")", "secure", "=", "options", ".", "pop", "(", "\"secure\"", ",", "cloudinary", ".", "config", "(", ")", ".", "secure", ")", "private_cdn", "=", "options", ".", "pop", "(", "\"private_cdn\"", ",", "cloudinary", ".", "config", "(", ")", ".", "private_cdn", ")", "secure_distribution", "=", "options", ".", "pop", "(", "\"secure_distribution\"", ",", "cloudinary", ".", "config", "(", ")", ".", "secure_distribution", ")", "sign_url", "=", "options", ".", "pop", "(", "\"sign_url\"", ",", "cloudinary", ".", "config", "(", ")", ".", "sign_url", ")", "api_secret", "=", "options", ".", "pop", "(", "\"api_secret\"", ",", "cloudinary", ".", "config", "(", ")", ".", "api_secret", ")", "url_suffix", "=", "options", ".", "pop", "(", "\"url_suffix\"", ",", "None", ")", "use_root_path", "=", "options", ".", "pop", "(", "\"use_root_path\"", ",", "cloudinary", ".", "config", "(", ")", ".", "use_root_path", ")", "if", "url_suffix", "and", "not", "private_cdn", ":", "raise", "ValueError", "(", "\"URL Suffix only supported in private CDN\"", ")", "if", "(", "not", "source", ")", "or", "type", "==", "\"upload\"", "and", "re", ".", "match", "(", "r'^https?:'", ",", "source", ")", ":", "return", "(", "original_source", ",", "options", ")", "resource_type", ",", "type", "=", "finalize_resource_type", "(", "resource_type", ",", "type", ",", "url_suffix", ",", "use_root_path", ",", "shorten", ")", "source", ",", "source_to_sign", "=", "finalize_source", "(", "source", ",", "format", ",", "url_suffix", ")", "if", "source_to_sign", ".", "find", "(", "\"/\"", ")", ">=", "0", "and", "not", "re", ".", "match", "(", "r'^https?:/'", ",", "source_to_sign", ")", "and", "not", "re", ".", "match", "(", "r'^v[0-9]+'", ",", "source_to_sign", ")", "and", "not", "version", ":", "version", "=", "\"1\"", "if", "version", ":", "version", "=", "\"v\"", "+", "str", "(", "version", ")", "transformation", "=", "re", ".", "sub", "(", "r'([^:])/+'", ",", "r'\\1/'", ",", "transformation", ")", "signature", "=", "None", "if", "sign_url", ":", "to_sign", "=", "\"/\"", ".", "join", "(", "__compact", "(", "[", "transformation", ",", "source_to_sign", "]", ")", ")", "signature", "=", "\"s--\"", "+", "to_string", "(", "base64", ".", "urlsafe_b64encode", "(", "hashlib", ".", "sha1", "(", "to_bytes", "(", "to_sign", "+", "api_secret", ")", ")", ".", "digest", "(", ")", ")", "[", "0", ":", "8", "]", ")", "+", "\"--\"", "prefix", "=", "unsigned_download_url_prefix", "(", "source", ",", "cloud_name", ",", "private_cdn", ",", "cdn_subdomain", ",", "secure_cdn_subdomain", ",", "cname", ",", "secure", ",", "secure_distribution", ")", "source", "=", "\"/\"", ".", "join", "(", "__compact", "(", "[", "prefix", ",", "resource_type", ",", "type", ",", "signature", ",", "transformation", ",", "version", ",", "source", "]", ")", ")", "return", "(", "source", ",", "options", ")" ]
https://github.com/jakewaldron/PlexEmail/blob/8e560407a87f58e02d1f1e8048dacb9be6e35c03/scripts/cloudinary/utils.py#L202-L253
RhetTbull/osxphotos
231d13279296ee4a242d3140d8abe7b5a5bcc9c0
osxphotos/utils.py
python
_open_sql_file
(dbname)
return (conn, c)
opens sqlite file dbname in read-only mode returns tuple of (connection, cursor)
opens sqlite file dbname in read-only mode returns tuple of (connection, cursor)
[ "opens", "sqlite", "file", "dbname", "in", "read", "-", "only", "mode", "returns", "tuple", "of", "(", "connection", "cursor", ")" ]
def _open_sql_file(dbname): """opens sqlite file dbname in read-only mode returns tuple of (connection, cursor)""" try: dbpath = pathlib.Path(dbname).resolve() conn = sqlite3.connect(f"{dbpath.as_uri()}?mode=ro", timeout=1, uri=True) c = conn.cursor() except sqlite3.Error as e: sys.exit(f"An error occurred opening sqlite file: {e.args[0]} {dbname}") return (conn, c)
[ "def", "_open_sql_file", "(", "dbname", ")", ":", "try", ":", "dbpath", "=", "pathlib", ".", "Path", "(", "dbname", ")", ".", "resolve", "(", ")", "conn", "=", "sqlite3", ".", "connect", "(", "f\"{dbpath.as_uri()}?mode=ro\"", ",", "timeout", "=", "1", ",", "uri", "=", "True", ")", "c", "=", "conn", ".", "cursor", "(", ")", "except", "sqlite3", ".", "Error", "as", "e", ":", "sys", ".", "exit", "(", "f\"An error occurred opening sqlite file: {e.args[0]} {dbname}\"", ")", "return", "(", "conn", ",", "c", ")" ]
https://github.com/RhetTbull/osxphotos/blob/231d13279296ee4a242d3140d8abe7b5a5bcc9c0/osxphotos/utils.py#L290-L299
Tautulli/Tautulli
2410eb33805aaac4bd1c5dad0f71e4f15afaf742
lib/charset_normalizer/md.py
python
SuperWeirdWordPlugin.feed
(self, character: str)
[]
def feed(self, character: str) -> None: if character.isalpha(): self._buffer = "".join([self._buffer, character]) if is_accentuated(character): self._buffer_accent_count += 1 if ( self._foreign_long_watch is False and (is_latin(character) is False or is_accentuated(character)) and is_cjk(character) is False and is_hangul(character) is False and is_katakana(character) is False and is_hiragana(character) is False and is_thai(character) is False ): self._foreign_long_watch = True return if not self._buffer: return if ( character.isspace() or is_punctuation(character) or is_separator(character) ) and self._buffer: self._word_count += 1 buffer_length = len(self._buffer) # type: int self._character_count += buffer_length if buffer_length >= 4: if self._buffer_accent_count / buffer_length > 0.34: self._is_current_word_bad = True # Word/Buffer ending with a upper case accentuated letter are so rare, # that we will consider them all as suspicious. Same weight as foreign_long suspicious. if is_accentuated(self._buffer[-1]) and self._buffer[-1].isupper(): self._foreign_long_count += 1 self._is_current_word_bad = True if buffer_length >= 24 and self._foreign_long_watch: self._foreign_long_count += 1 self._is_current_word_bad = True if self._is_current_word_bad: self._bad_word_count += 1 self._bad_character_count += len(self._buffer) self._is_current_word_bad = False self._foreign_long_watch = False self._buffer = "" self._buffer_accent_count = 0 elif ( character not in {"<", ">", "-", "="} and character.isdigit() is False and is_symbol(character) ): self._is_current_word_bad = True self._buffer += character
[ "def", "feed", "(", "self", ",", "character", ":", "str", ")", "->", "None", ":", "if", "character", ".", "isalpha", "(", ")", ":", "self", ".", "_buffer", "=", "\"\"", ".", "join", "(", "[", "self", ".", "_buffer", ",", "character", "]", ")", "if", "is_accentuated", "(", "character", ")", ":", "self", ".", "_buffer_accent_count", "+=", "1", "if", "(", "self", ".", "_foreign_long_watch", "is", "False", "and", "(", "is_latin", "(", "character", ")", "is", "False", "or", "is_accentuated", "(", "character", ")", ")", "and", "is_cjk", "(", "character", ")", "is", "False", "and", "is_hangul", "(", "character", ")", "is", "False", "and", "is_katakana", "(", "character", ")", "is", "False", "and", "is_hiragana", "(", "character", ")", "is", "False", "and", "is_thai", "(", "character", ")", "is", "False", ")", ":", "self", ".", "_foreign_long_watch", "=", "True", "return", "if", "not", "self", ".", "_buffer", ":", "return", "if", "(", "character", ".", "isspace", "(", ")", "or", "is_punctuation", "(", "character", ")", "or", "is_separator", "(", "character", ")", ")", "and", "self", ".", "_buffer", ":", "self", ".", "_word_count", "+=", "1", "buffer_length", "=", "len", "(", "self", ".", "_buffer", ")", "# type: int", "self", ".", "_character_count", "+=", "buffer_length", "if", "buffer_length", ">=", "4", ":", "if", "self", ".", "_buffer_accent_count", "/", "buffer_length", ">", "0.34", ":", "self", ".", "_is_current_word_bad", "=", "True", "# Word/Buffer ending with a upper case accentuated letter are so rare,", "# that we will consider them all as suspicious. Same weight as foreign_long suspicious.", "if", "is_accentuated", "(", "self", ".", "_buffer", "[", "-", "1", "]", ")", "and", "self", ".", "_buffer", "[", "-", "1", "]", ".", "isupper", "(", ")", ":", "self", ".", "_foreign_long_count", "+=", "1", "self", ".", "_is_current_word_bad", "=", "True", "if", "buffer_length", ">=", "24", "and", "self", ".", "_foreign_long_watch", ":", "self", ".", "_foreign_long_count", "+=", "1", "self", ".", "_is_current_word_bad", "=", "True", "if", "self", ".", "_is_current_word_bad", ":", "self", ".", "_bad_word_count", "+=", "1", "self", ".", "_bad_character_count", "+=", "len", "(", "self", ".", "_buffer", ")", "self", ".", "_is_current_word_bad", "=", "False", "self", ".", "_foreign_long_watch", "=", "False", "self", ".", "_buffer", "=", "\"\"", "self", ".", "_buffer_accent_count", "=", "0", "elif", "(", "character", "not", "in", "{", "\"<\"", ",", "\">\"", ",", "\"-\"", ",", "\"=\"", "}", "and", "character", ".", "isdigit", "(", ")", "is", "False", "and", "is_symbol", "(", "character", ")", ")", ":", "self", ".", "_is_current_word_bad", "=", "True", "self", ".", "_buffer", "+=", "character" ]
https://github.com/Tautulli/Tautulli/blob/2410eb33805aaac4bd1c5dad0f71e4f15afaf742/lib/charset_normalizer/md.py#L270-L322
DIGITALCRIMINAL/OnlyFans
d1a5f7112eb95145fbfa9a3c58e3a49d69cbd10a
database/archived_databases/posts/alembic/env.py
python
run_migrations_online
()
Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context.
Run migrations in 'online' mode.
[ "Run", "migrations", "in", "online", "mode", "." ]
def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ connectable = engine_from_config( config.get_section(config.config_ini_section), prefix="sqlalchemy.", poolclass=pool.NullPool, ) with connectable.connect() as connection: context.configure( connection=connection, target_metadata=target_metadata, render_as_batch=True, compare_type=True, ) with context.begin_transaction(): context.run_migrations()
[ "def", "run_migrations_online", "(", ")", ":", "connectable", "=", "engine_from_config", "(", "config", ".", "get_section", "(", "config", ".", "config_ini_section", ")", ",", "prefix", "=", "\"sqlalchemy.\"", ",", "poolclass", "=", "pool", ".", "NullPool", ",", ")", "with", "connectable", ".", "connect", "(", ")", "as", "connection", ":", "context", ".", "configure", "(", "connection", "=", "connection", ",", "target_metadata", "=", "target_metadata", ",", "render_as_batch", "=", "True", ",", "compare_type", "=", "True", ",", ")", "with", "context", ".", "begin_transaction", "(", ")", ":", "context", ".", "run_migrations", "(", ")" ]
https://github.com/DIGITALCRIMINAL/OnlyFans/blob/d1a5f7112eb95145fbfa9a3c58e3a49d69cbd10a/database/archived_databases/posts/alembic/env.py#L54-L75
sth2018/FastWordQuery
901ebe8fd5989d8861d20ee3fec3acd15b7f46a5
addons/fastwq/service/base.py
python
MdxService.get_html
(self)
return self.html_cache[self.word]
get self.word's html page from MDX
get self.word's html page from MDX
[ "get", "self", ".", "word", "s", "html", "page", "from", "MDX" ]
def get_html(self): """get self.word's html page from MDX""" if not self.html_cache[self.word]: html = self._get_definition_mdx() if html: self.html_cache[self.word] = html return self.html_cache[self.word]
[ "def", "get_html", "(", "self", ")", ":", "if", "not", "self", ".", "html_cache", "[", "self", ".", "word", "]", ":", "html", "=", "self", ".", "_get_definition_mdx", "(", ")", "if", "html", ":", "self", ".", "html_cache", "[", "self", ".", "word", "]", "=", "html", "return", "self", ".", "html_cache", "[", "self", ".", "word", "]" ]
https://github.com/sth2018/FastWordQuery/blob/901ebe8fd5989d8861d20ee3fec3acd15b7f46a5/addons/fastwq/service/base.py#L598-L604
007gzs/dingtalk-sdk
7979da2e259fdbc571728cae2425a04dbc65850a
dingtalk/client/api/taobao.py
python
TbYeWuPingTaiXinLingShou.taobao_uscesl_biz_esl_unbind
( self, esl_bar_code, store_id, biz_brand_key )
return self._top_request( "taobao.uscesl.biz.esl.unbind", { "esl_bar_code": esl_bar_code, "store_id": store_id, "biz_brand_key": biz_brand_key } )
电子价签解绑接口 文档地址:https://open-doc.dingtalk.com/docs/api.htm?apiId=39107 :param esl_bar_code: 价签条码 :param store_id: 价签系统注册的门店storeId :param biz_brand_key: 商家标识key
电子价签解绑接口 文档地址:https://open-doc.dingtalk.com/docs/api.htm?apiId=39107
[ "电子价签解绑接口", "文档地址:https", ":", "//", "open", "-", "doc", ".", "dingtalk", ".", "com", "/", "docs", "/", "api", ".", "htm?apiId", "=", "39107" ]
def taobao_uscesl_biz_esl_unbind( self, esl_bar_code, store_id, biz_brand_key ): """ 电子价签解绑接口 文档地址:https://open-doc.dingtalk.com/docs/api.htm?apiId=39107 :param esl_bar_code: 价签条码 :param store_id: 价签系统注册的门店storeId :param biz_brand_key: 商家标识key """ return self._top_request( "taobao.uscesl.biz.esl.unbind", { "esl_bar_code": esl_bar_code, "store_id": store_id, "biz_brand_key": biz_brand_key } )
[ "def", "taobao_uscesl_biz_esl_unbind", "(", "self", ",", "esl_bar_code", ",", "store_id", ",", "biz_brand_key", ")", ":", "return", "self", ".", "_top_request", "(", "\"taobao.uscesl.biz.esl.unbind\"", ",", "{", "\"esl_bar_code\"", ":", "esl_bar_code", ",", "\"store_id\"", ":", "store_id", ",", "\"biz_brand_key\"", ":", "biz_brand_key", "}", ")" ]
https://github.com/007gzs/dingtalk-sdk/blob/7979da2e259fdbc571728cae2425a04dbc65850a/dingtalk/client/api/taobao.py#L102325-L102346
dimagi/commcare-hq
d67ff1d3b4c51fa050c19e60c3253a79d3452a39
corehq/apps/translations/integrations/transifex/transifex.py
python
Transifex._send_files_to_transifex
(self, generated_files, app_trans_generator)
return file_uploads
[]
def _send_files_to_transifex(self, generated_files, app_trans_generator): file_uploads = {} for resource_slug, path_to_file in generated_files: resource_name = self._resource_name_in_project_lang(resource_slug, app_trans_generator) if self.is_source_file: response = self.client.upload_resource( path_to_file, resource_slug, resource_name, self.update_resource ) else: response = self.client.upload_translation( path_to_file, resource_slug, resource_name, self.source_lang ) if response.status_code in [200, 201]: file_uploads[resource_name] = _("Successfully Uploaded") else: file_uploads[resource_name] = "{}: {}".format(response.status_code, response.content) return file_uploads
[ "def", "_send_files_to_transifex", "(", "self", ",", "generated_files", ",", "app_trans_generator", ")", ":", "file_uploads", "=", "{", "}", "for", "resource_slug", ",", "path_to_file", "in", "generated_files", ":", "resource_name", "=", "self", ".", "_resource_name_in_project_lang", "(", "resource_slug", ",", "app_trans_generator", ")", "if", "self", ".", "is_source_file", ":", "response", "=", "self", ".", "client", ".", "upload_resource", "(", "path_to_file", ",", "resource_slug", ",", "resource_name", ",", "self", ".", "update_resource", ")", "else", ":", "response", "=", "self", ".", "client", ".", "upload_translation", "(", "path_to_file", ",", "resource_slug", ",", "resource_name", ",", "self", ".", "source_lang", ")", "if", "response", ".", "status_code", "in", "[", "200", ",", "201", "]", ":", "file_uploads", "[", "resource_name", "]", "=", "_", "(", "\"Successfully Uploaded\"", ")", "else", ":", "file_uploads", "[", "resource_name", "]", "=", "\"{}: {}\"", ".", "format", "(", "response", ".", "status_code", ",", "response", ".", "content", ")", "return", "file_uploads" ]
https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/apps/translations/integrations/transifex/transifex.py#L100-L120
missionpinball/mpf
8e6b74cff4ba06d2fec9445742559c1068b88582
mpf/wire/base.py
python
System.daisy_chain_dict
(self, items: Dict[int, T], get_in: Callable[[T], Pin], get_out: Callable[[T], Pin], start: int, ladder: bool)
return True
Like daisy_chain_list but takes a dict and checks it for sequentiality in the process of daisy chaining. Used for directly daisy chaining elements with specified numbers. :param items The dictionary from numbers to items, of any type. :param get_in Function to apply to an item to get the input pin. :param get_out Function to apply to an item to get the output pin. :param start Number to start accessing the dictionary from. :param ladder If true, alternate chain connections are flipped to create a vertical ladder in wireviz. :return True if all items in the dictionary were sequentially numbered. If not, the chain stops at the first gap.
Like daisy_chain_list but takes a dict and checks it for sequentiality in the process of daisy chaining.
[ "Like", "daisy_chain_list", "but", "takes", "a", "dict", "and", "checks", "it", "for", "sequentiality", "in", "the", "process", "of", "daisy", "chaining", "." ]
def daisy_chain_dict(self, items: Dict[int, T], get_in: Callable[[T], Pin], get_out: Callable[[T], Pin], start: int, ladder: bool) -> bool: """Like daisy_chain_list but takes a dict and checks it for sequentiality in the process of daisy chaining. Used for directly daisy chaining elements with specified numbers. :param items The dictionary from numbers to items, of any type. :param get_in Function to apply to an item to get the input pin. :param get_out Function to apply to an item to get the output pin. :param start Number to start accessing the dictionary from. :param ladder If true, alternate chain connections are flipped to create a vertical ladder in wireviz. :return True if all items in the dictionary were sequentially numbered. If not, the chain stops at the first gap. """ if len(items) < 2: return True if start not in items: return False even = False for index in range(start + 1, start + len(items)): if index not in items: return False if even or not ladder: self.connect(get_out(items[index - 1]), get_in(items[index])) else: self.connect(get_in(items[index]), get_out(items[index - 1])) even = not even return True
[ "def", "daisy_chain_dict", "(", "self", ",", "items", ":", "Dict", "[", "int", ",", "T", "]", ",", "get_in", ":", "Callable", "[", "[", "T", "]", ",", "Pin", "]", ",", "get_out", ":", "Callable", "[", "[", "T", "]", ",", "Pin", "]", ",", "start", ":", "int", ",", "ladder", ":", "bool", ")", "->", "bool", ":", "if", "len", "(", "items", ")", "<", "2", ":", "return", "True", "if", "start", "not", "in", "items", ":", "return", "False", "even", "=", "False", "for", "index", "in", "range", "(", "start", "+", "1", ",", "start", "+", "len", "(", "items", ")", ")", ":", "if", "index", "not", "in", "items", ":", "return", "False", "if", "even", "or", "not", "ladder", ":", "self", ".", "connect", "(", "get_out", "(", "items", "[", "index", "-", "1", "]", ")", ",", "get_in", "(", "items", "[", "index", "]", ")", ")", "else", ":", "self", ".", "connect", "(", "get_in", "(", "items", "[", "index", "]", ")", ",", "get_out", "(", "items", "[", "index", "-", "1", "]", ")", ")", "even", "=", "not", "even", "return", "True" ]
https://github.com/missionpinball/mpf/blob/8e6b74cff4ba06d2fec9445742559c1068b88582/mpf/wire/base.py#L244-L270
pytorch/audio
7b6b2d000023e2aa3365b769866c5f375e0d5fda
torchaudio/functional/functional.py
python
linear_fbanks
( n_freqs: int, f_min: float, f_max: float, n_filter: int, sample_rate: int, )
return fb
r"""Creates a linear triangular filterbank. Note: For the sake of the numerical compatibility with librosa, not all the coefficients in the resulting filter bank has magnitude of 1. .. image:: https://download.pytorch.org/torchaudio/doc-assets/lin_fbanks.png :alt: Visualization of generated filter bank Args: n_freqs (int): Number of frequencies to highlight/apply f_min (float): Minimum frequency (Hz) f_max (float): Maximum frequency (Hz) n_filter (int): Number of (linear) triangular filter sample_rate (int): Sample rate of the audio waveform Returns: Tensor: Triangular filter banks (fb matrix) of size (``n_freqs``, ``n_filter``) meaning number of frequencies to highlight/apply to x the number of filterbanks. Each column is a filterbank so that assuming there is a matrix A of size (..., ``n_freqs``), the applied result would be ``A * linear_fbanks(A.size(-1), ...)``.
r"""Creates a linear triangular filterbank.
[ "r", "Creates", "a", "linear", "triangular", "filterbank", "." ]
def linear_fbanks( n_freqs: int, f_min: float, f_max: float, n_filter: int, sample_rate: int, ) -> Tensor: r"""Creates a linear triangular filterbank. Note: For the sake of the numerical compatibility with librosa, not all the coefficients in the resulting filter bank has magnitude of 1. .. image:: https://download.pytorch.org/torchaudio/doc-assets/lin_fbanks.png :alt: Visualization of generated filter bank Args: n_freqs (int): Number of frequencies to highlight/apply f_min (float): Minimum frequency (Hz) f_max (float): Maximum frequency (Hz) n_filter (int): Number of (linear) triangular filter sample_rate (int): Sample rate of the audio waveform Returns: Tensor: Triangular filter banks (fb matrix) of size (``n_freqs``, ``n_filter``) meaning number of frequencies to highlight/apply to x the number of filterbanks. Each column is a filterbank so that assuming there is a matrix A of size (..., ``n_freqs``), the applied result would be ``A * linear_fbanks(A.size(-1), ...)``. """ # freq bins all_freqs = torch.linspace(0, sample_rate // 2, n_freqs) # filter mid-points f_pts = torch.linspace(f_min, f_max, n_filter + 2) # create filterbank fb = _create_triangular_filterbank(all_freqs, f_pts) return fb
[ "def", "linear_fbanks", "(", "n_freqs", ":", "int", ",", "f_min", ":", "float", ",", "f_max", ":", "float", ",", "n_filter", ":", "int", ",", "sample_rate", ":", "int", ",", ")", "->", "Tensor", ":", "# freq bins", "all_freqs", "=", "torch", ".", "linspace", "(", "0", ",", "sample_rate", "//", "2", ",", "n_freqs", ")", "# filter mid-points", "f_pts", "=", "torch", ".", "linspace", "(", "f_min", ",", "f_max", ",", "n_filter", "+", "2", ")", "# create filterbank", "fb", "=", "_create_triangular_filterbank", "(", "all_freqs", ",", "f_pts", ")", "return", "fb" ]
https://github.com/pytorch/audio/blob/7b6b2d000023e2aa3365b769866c5f375e0d5fda/torchaudio/functional/functional.py#L518-L557
jmoiron/humanize
f702c1c5fba49418a2d76cee9db0249966f090e6
src/humanize/i18n.py
python
_gettext_noop
(message)
return message
Mark a string as a translation string without translating it. Example usage: ```python CONSTANTS = [_gettext_noop('first'), _gettext_noop('second')] def num_name(n): return _gettext(CONSTANTS[n]) ``` Args: message (str): Text to translate in the future. Returns: str: Original text, unchanged.
Mark a string as a translation string without translating it.
[ "Mark", "a", "string", "as", "a", "translation", "string", "without", "translating", "it", "." ]
def _gettext_noop(message): """Mark a string as a translation string without translating it. Example usage: ```python CONSTANTS = [_gettext_noop('first'), _gettext_noop('second')] def num_name(n): return _gettext(CONSTANTS[n]) ``` Args: message (str): Text to translate in the future. Returns: str: Original text, unchanged. """ return message
[ "def", "_gettext_noop", "(", "message", ")", ":", "return", "message" ]
https://github.com/jmoiron/humanize/blob/f702c1c5fba49418a2d76cee9db0249966f090e6/src/humanize/i18n.py#L189-L205
futurecore/python-csp
1f96b76de1531ecf6bf1759641eadb08266ff7e7
examples/sensors/oscilloscope.py
python
__test_random
()
return
Test the Oscilloscope with random data.
Test the Oscilloscope with random data.
[ "Test", "the", "Oscilloscope", "with", "random", "data", "." ]
def __test_random(): """Test the Oscilloscope with random data. """ channel = Channel() par = Par(__Random(channel), Oscilloscope(channel)) par.start() return
[ "def", "__test_random", "(", ")", ":", "channel", "=", "Channel", "(", ")", "par", "=", "Par", "(", "__Random", "(", "channel", ")", ",", "Oscilloscope", "(", "channel", ")", ")", "par", ".", "start", "(", ")", "return" ]
https://github.com/futurecore/python-csp/blob/1f96b76de1531ecf6bf1759641eadb08266ff7e7/examples/sensors/oscilloscope.py#L117-L123
HymanLiuTS/flaskTs
286648286976e85d9b9a5873632331efcafe0b21
flasky/lib/python2.7/site-packages/sqlalchemy/sql/functions.py
python
Function.__init__
(self, name, *clauses, **kw)
Construct a :class:`.Function`. The :data:`.func` construct is normally used to construct new :class:`.Function` instances.
Construct a :class:`.Function`.
[ "Construct", "a", ":", "class", ":", ".", "Function", "." ]
def __init__(self, name, *clauses, **kw): """Construct a :class:`.Function`. The :data:`.func` construct is normally used to construct new :class:`.Function` instances. """ self.packagenames = kw.pop('packagenames', None) or [] self.name = name self._bind = kw.get('bind', None) self.type = sqltypes.to_instance(kw.get('type_', None)) FunctionElement.__init__(self, *clauses, **kw)
[ "def", "__init__", "(", "self", ",", "name", ",", "*", "clauses", ",", "*", "*", "kw", ")", ":", "self", ".", "packagenames", "=", "kw", ".", "pop", "(", "'packagenames'", ",", "None", ")", "or", "[", "]", "self", ".", "name", "=", "name", "self", ".", "_bind", "=", "kw", ".", "get", "(", "'bind'", ",", "None", ")", "self", ".", "type", "=", "sqltypes", ".", "to_instance", "(", "kw", ".", "get", "(", "'type_'", ",", "None", ")", ")", "FunctionElement", ".", "__init__", "(", "self", ",", "*", "clauses", ",", "*", "*", "kw", ")" ]
https://github.com/HymanLiuTS/flaskTs/blob/286648286976e85d9b9a5873632331efcafe0b21/flasky/lib/python2.7/site-packages/sqlalchemy/sql/functions.py#L422-L434
PINTO0309/PINTO_model_zoo
2924acda7a7d541d8712efd7cc4fd1c61ef5bddd
064_Dense_Depth/nyu/01_float32/13_integer_quantization.py
python
representative_dataset_gen_480x640
()
[]
def representative_dataset_gen_480x640(): for data in raw_test_data.take(10): image = data['image'].numpy() image = tf.image.resize(image, (480, 640)) image = image[np.newaxis,:,:,:] image = image - 127.5 image = image * 0.007843 yield [image]
[ "def", "representative_dataset_gen_480x640", "(", ")", ":", "for", "data", "in", "raw_test_data", ".", "take", "(", "10", ")", ":", "image", "=", "data", "[", "'image'", "]", ".", "numpy", "(", ")", "image", "=", "tf", ".", "image", ".", "resize", "(", "image", ",", "(", "480", ",", "640", ")", ")", "image", "=", "image", "[", "np", ".", "newaxis", ",", ":", ",", ":", ",", ":", "]", "image", "=", "image", "-", "127.5", "image", "=", "image", "*", "0.007843", "yield", "[", "image", "]" ]
https://github.com/PINTO0309/PINTO_model_zoo/blob/2924acda7a7d541d8712efd7cc4fd1c61ef5bddd/064_Dense_Depth/nyu/01_float32/13_integer_quantization.py#L7-L14
demisto/content
5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07
Packs/Cymptom/Integrations/Cymptom/Cymptom.py
python
api_test
(client: Client)
Returning 'ok' indicates that the integration works like it is supposed to and the Connection to the service is successful. :param client: Cymptom client
Returning 'ok' indicates that the integration works like it is supposed to and the Connection to the service is successful. :param client: Cymptom client
[ "Returning", "ok", "indicates", "that", "the", "integration", "works", "like", "it", "is", "supposed", "to", "and", "the", "Connection", "to", "the", "service", "is", "successful", ".", ":", "param", "client", ":", "Cymptom", "client" ]
def api_test(client: Client): """ Returning 'ok' indicates that the integration works like it is supposed to and the Connection to the service is successful. :param client: Cymptom client """ try: results = client.api_test() if results and results.get("status") == "ok": return return_results('ok') else: return return_error(f"There was an error: {results.get('status', 'Failure')} - {results.get('error')}") except Exception as e: return_error(f"There was an error in testing connection to URL: {client._base_url}," f"Please make sure that the API key is valid and has the right permissions, " f"and that the URL is in the correct form. Error: {str(e)}")
[ "def", "api_test", "(", "client", ":", "Client", ")", ":", "try", ":", "results", "=", "client", ".", "api_test", "(", ")", "if", "results", "and", "results", ".", "get", "(", "\"status\"", ")", "==", "\"ok\"", ":", "return", "return_results", "(", "'ok'", ")", "else", ":", "return", "return_error", "(", "f\"There was an error: {results.get('status', 'Failure')} - {results.get('error')}\"", ")", "except", "Exception", "as", "e", ":", "return_error", "(", "f\"There was an error in testing connection to URL: {client._base_url},\"", "f\"Please make sure that the API key is valid and has the right permissions, \"", "f\"and that the URL is in the correct form. Error: {str(e)}\"", ")" ]
https://github.com/demisto/content/blob/5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07/Packs/Cymptom/Integrations/Cymptom/Cymptom.py#L74-L88
apple/ccs-calendarserver
13c706b985fb728b9aab42dc0fef85aae21921c3
contrib/calendarserver_conversion.py
python
resetPostgresConf
(serverRoot)
[]
def resetPostgresConf(serverRoot): clusterDir = os.path.join(serverRoot, "Data/Database.xpg/cluster.pg") confFile = os.path.join(clusterDir, "postgresql.conf") if os.path.exists(confFile): os.remove(confFile) subprocess.call([ "/usr/bin/touch", confFile ], stdout=sys.stdout, stderr=sys.stderr)
[ "def", "resetPostgresConf", "(", "serverRoot", ")", ":", "clusterDir", "=", "os", ".", "path", ".", "join", "(", "serverRoot", ",", "\"Data/Database.xpg/cluster.pg\"", ")", "confFile", "=", "os", ".", "path", ".", "join", "(", "clusterDir", ",", "\"postgresql.conf\"", ")", "if", "os", ".", "path", ".", "exists", "(", "confFile", ")", ":", "os", ".", "remove", "(", "confFile", ")", "subprocess", ".", "call", "(", "[", "\"/usr/bin/touch\"", ",", "confFile", "]", ",", "stdout", "=", "sys", ".", "stdout", ",", "stderr", "=", "sys", ".", "stderr", ")" ]
https://github.com/apple/ccs-calendarserver/blob/13c706b985fb728b9aab42dc0fef85aae21921c3/contrib/calendarserver_conversion.py#L136-L143
jgagneastro/coffeegrindsize
22661ebd21831dba4cf32bfc6ba59fe3d49f879c
App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/scipy/optimize/_remove_redundancy.py
python
_remove_zero_rows
(A, b)
return A, b, status, message
Eliminates trivial equations from system of equations defined by Ax = b and identifies trivial infeasibilities Parameters ---------- A : 2-D array An array representing the left-hand side of a system of equations b : 1-D array An array representing the right-hand side of a system of equations Returns ------- A : 2-D array An array representing the left-hand side of a system of equations b : 1-D array An array representing the right-hand side of a system of equations status: int An integer indicating the status of the removal operation 0: No infeasibility identified 2: Trivially infeasible message : str A string descriptor of the exit status of the optimization.
Eliminates trivial equations from system of equations defined by Ax = b and identifies trivial infeasibilities
[ "Eliminates", "trivial", "equations", "from", "system", "of", "equations", "defined", "by", "Ax", "=", "b", "and", "identifies", "trivial", "infeasibilities" ]
def _remove_zero_rows(A, b): """ Eliminates trivial equations from system of equations defined by Ax = b and identifies trivial infeasibilities Parameters ---------- A : 2-D array An array representing the left-hand side of a system of equations b : 1-D array An array representing the right-hand side of a system of equations Returns ------- A : 2-D array An array representing the left-hand side of a system of equations b : 1-D array An array representing the right-hand side of a system of equations status: int An integer indicating the status of the removal operation 0: No infeasibility identified 2: Trivially infeasible message : str A string descriptor of the exit status of the optimization. """ status = 0 message = "" i_zero = _row_count(A) == 0 A = A[np.logical_not(i_zero), :] if not(np.allclose(b[i_zero], 0)): status = 2 message = "There is a zero row in A_eq with a nonzero corresponding " \ "entry in b_eq. The problem is infeasible." b = b[np.logical_not(i_zero)] return A, b, status, message
[ "def", "_remove_zero_rows", "(", "A", ",", "b", ")", ":", "status", "=", "0", "message", "=", "\"\"", "i_zero", "=", "_row_count", "(", "A", ")", "==", "0", "A", "=", "A", "[", "np", ".", "logical_not", "(", "i_zero", ")", ",", ":", "]", "if", "not", "(", "np", ".", "allclose", "(", "b", "[", "i_zero", "]", ",", "0", ")", ")", ":", "status", "=", "2", "message", "=", "\"There is a zero row in A_eq with a nonzero corresponding \"", "\"entry in b_eq. The problem is infeasible.\"", "b", "=", "b", "[", "np", ".", "logical_not", "(", "i_zero", ")", "]", "return", "A", ",", "b", ",", "status", ",", "message" ]
https://github.com/jgagneastro/coffeegrindsize/blob/22661ebd21831dba4cf32bfc6ba59fe3d49f879c/App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/scipy/optimize/_remove_redundancy.py#L57-L92
ninja-ide/ninja-ide
87d91131bd19fdc3dcfd91eb97ad1e41c49c60c0
ninja_ide/dependencies/pycodestyle.py
python
StyleGuide.get_checks
(self, argument_name)
return sorted(checks)
Get all the checks for this category. Find all globally visible functions where the first argument name starts with argument_name and which contain selected tests.
Get all the checks for this category.
[ "Get", "all", "the", "checks", "for", "this", "category", "." ]
def get_checks(self, argument_name): """Get all the checks for this category. Find all globally visible functions where the first argument name starts with argument_name and which contain selected tests. """ checks = [] for check, attrs in _checks[argument_name].items(): (codes, args) = attrs if any(not (code and self.ignore_code(code)) for code in codes): checks.append((check.__name__, check, args)) return sorted(checks)
[ "def", "get_checks", "(", "self", ",", "argument_name", ")", ":", "checks", "=", "[", "]", "for", "check", ",", "attrs", "in", "_checks", "[", "argument_name", "]", ".", "items", "(", ")", ":", "(", "codes", ",", "args", ")", "=", "attrs", "if", "any", "(", "not", "(", "code", "and", "self", ".", "ignore_code", "(", "code", ")", ")", "for", "code", "in", "codes", ")", ":", "checks", ".", "append", "(", "(", "check", ".", "__name__", ",", "check", ",", "args", ")", ")", "return", "sorted", "(", "checks", ")" ]
https://github.com/ninja-ide/ninja-ide/blob/87d91131bd19fdc3dcfd91eb97ad1e41c49c60c0/ninja_ide/dependencies/pycodestyle.py#L2072-L2083
vivisect/vivisect
37b0b655d8dedfcf322e86b0f144b096e48d547e
envi/cli.py
python
EnviCli.do_saveout
(self, line)
saves output to file for any command. still outputs to whatever canvas the command normally outputs to. saveout <output file> <cli command> Example: saveout out.txt search -c MZ
saves output to file for any command. still outputs to whatever canvas the command normally outputs to.
[ "saves", "output", "to", "file", "for", "any", "command", ".", "still", "outputs", "to", "whatever", "canvas", "the", "command", "normally", "outputs", "to", "." ]
def do_saveout(self, line): ''' saves output to file for any command. still outputs to whatever canvas the command normally outputs to. saveout <output file> <cli command> Example: saveout out.txt search -c MZ ''' argv = shlex.split(line) if len(argv) < 2: return self.do_help('saveout') fname = argv[0] command = ' '.join(argv[1:]) strcanvas = e_canvas.StringMemoryCanvas(self.canvas.mem) with e_canvas.TeeCanvas(self, (self.canvas, strcanvas)) as tc: self.onecmd(command) with open(fname, 'wb') as f: f.write(str(strcanvas).encode('utf-8'))
[ "def", "do_saveout", "(", "self", ",", "line", ")", ":", "argv", "=", "shlex", ".", "split", "(", "line", ")", "if", "len", "(", "argv", ")", "<", "2", ":", "return", "self", ".", "do_help", "(", "'saveout'", ")", "fname", "=", "argv", "[", "0", "]", "command", "=", "' '", ".", "join", "(", "argv", "[", "1", ":", "]", ")", "strcanvas", "=", "e_canvas", ".", "StringMemoryCanvas", "(", "self", ".", "canvas", ".", "mem", ")", "with", "e_canvas", ".", "TeeCanvas", "(", "self", ",", "(", "self", ".", "canvas", ",", "strcanvas", ")", ")", "as", "tc", ":", "self", ".", "onecmd", "(", "command", ")", "with", "open", "(", "fname", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "str", "(", "strcanvas", ")", ".", "encode", "(", "'utf-8'", ")", ")" ]
https://github.com/vivisect/vivisect/blob/37b0b655d8dedfcf322e86b0f144b096e48d547e/envi/cli.py#L606-L628
mrkipling/maraschino
c6be9286937783ae01df2d6d8cebfc8b2734a7d7
lib/werkzeug/contrib/atom.py
python
format_iso8601
(obj)
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
Format a datetime object for iso8601
Format a datetime object for iso8601
[ "Format", "a", "datetime", "object", "for", "iso8601" ]
def format_iso8601(obj): """Format a datetime object for iso8601""" return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
[ "def", "format_iso8601", "(", "obj", ")", ":", "return", "obj", ".", "strftime", "(", "'%Y-%m-%dT%H:%M:%SZ'", ")" ]
https://github.com/mrkipling/maraschino/blob/c6be9286937783ae01df2d6d8cebfc8b2734a7d7/lib/werkzeug/contrib/atom.py#L43-L45
huggingface/transformers
623b4f7c63f60cce917677ee704d6c93ee960b4b
src/transformers/utils/dummy_sentencepiece_objects.py
python
XLNetTokenizer.__init__
(self, *args, **kwargs)
[]
def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"])
[ "def", "__init__", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "requires_backends", "(", "self", ",", "[", "\"sentencepiece\"", "]", ")" ]
https://github.com/huggingface/transformers/blob/623b4f7c63f60cce917677ee704d6c93ee960b4b/src/transformers/utils/dummy_sentencepiece_objects.py#L195-L196
allenai/allennlp
a3d71254fcc0f3615910e9c3d48874515edf53e0
allennlp/modules/transformer/t5.py
python
T5.resize_token_embeddings
( self, new_size: int, *, init_fn: Callable = torch.nn.init.normal_ )
Resizes the token embeddings in the model. This takes care of the token embeddings for the encoder, the decoder, and the LM head. new_size : `int` The new size of the token embeddings init_fn : `Callable` The function to use to initialize new embeddings. This function will be called with a single argument, the tensor to initialize, and it is expected to initialize the tensor in place. Many of the functions from `torch.nn.init` fit.
Resizes the token embeddings in the model.
[ "Resizes", "the", "token", "embeddings", "in", "the", "model", "." ]
def resize_token_embeddings( self, new_size: int, *, init_fn: Callable = torch.nn.init.normal_ ) -> None: """ Resizes the token embeddings in the model. This takes care of the token embeddings for the encoder, the decoder, and the LM head. new_size : `int` The new size of the token embeddings init_fn : `Callable` The function to use to initialize new embeddings. This function will be called with a single argument, the tensor to initialize, and it is expected to initialize the tensor in place. Many of the functions from `torch.nn.init` fit. """ self.encoder.resize_token_embeddings(new_size, init_fn=init_fn) # If encoder and decoder share embeddings, this is a no-op the second time. self.decoder.resize_token_embeddings(new_size, init_fn=init_fn) # resize lm head old_size = self.lm_head.out_features if old_size == new_size: return new_lm_head = torch.nn.Linear( self.lm_head.in_features, new_size, self.lm_head.bias, self.lm_head.weight.device, self.lm_head.weight.dtype, ) copy_size = min(old_size, new_size) new_lm_head.weight.data[:copy_size, ...] = self.lm_head.weight.data[:copy_size, ...] if self.lm_head.bias and new_lm_head.bias: new_lm_head.bias.data[:copy_size, ...] = self.lm_head.bias[:copy_size, ...] if new_size > old_size: init_fn(new_lm_head.weight.data[copy_size:, ...]) if new_lm_head.bias: init_fn(new_lm_head.bias[copy_size:, ...]) self.lm_head = new_lm_head
[ "def", "resize_token_embeddings", "(", "self", ",", "new_size", ":", "int", ",", "*", ",", "init_fn", ":", "Callable", "=", "torch", ".", "nn", ".", "init", ".", "normal_", ")", "->", "None", ":", "self", ".", "encoder", ".", "resize_token_embeddings", "(", "new_size", ",", "init_fn", "=", "init_fn", ")", "# If encoder and decoder share embeddings, this is a no-op the second time.", "self", ".", "decoder", ".", "resize_token_embeddings", "(", "new_size", ",", "init_fn", "=", "init_fn", ")", "# resize lm head", "old_size", "=", "self", ".", "lm_head", ".", "out_features", "if", "old_size", "==", "new_size", ":", "return", "new_lm_head", "=", "torch", ".", "nn", ".", "Linear", "(", "self", ".", "lm_head", ".", "in_features", ",", "new_size", ",", "self", ".", "lm_head", ".", "bias", ",", "self", ".", "lm_head", ".", "weight", ".", "device", ",", "self", ".", "lm_head", ".", "weight", ".", "dtype", ",", ")", "copy_size", "=", "min", "(", "old_size", ",", "new_size", ")", "new_lm_head", ".", "weight", ".", "data", "[", ":", "copy_size", ",", "...", "]", "=", "self", ".", "lm_head", ".", "weight", ".", "data", "[", ":", "copy_size", ",", "...", "]", "if", "self", ".", "lm_head", ".", "bias", "and", "new_lm_head", ".", "bias", ":", "new_lm_head", ".", "bias", ".", "data", "[", ":", "copy_size", ",", "...", "]", "=", "self", ".", "lm_head", ".", "bias", "[", ":", "copy_size", ",", "...", "]", "if", "new_size", ">", "old_size", ":", "init_fn", "(", "new_lm_head", ".", "weight", ".", "data", "[", "copy_size", ":", ",", "...", "]", ")", "if", "new_lm_head", ".", "bias", ":", "init_fn", "(", "new_lm_head", ".", "bias", "[", "copy_size", ":", ",", "...", "]", ")", "self", ".", "lm_head", "=", "new_lm_head" ]
https://github.com/allenai/allennlp/blob/a3d71254fcc0f3615910e9c3d48874515edf53e0/allennlp/modules/transformer/t5.py#L838-L877
bendmorris/static-python
2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473
Lib/xml/dom/minidom.py
python
parse
(file, parser=None, bufsize=None)
Parse a file into a DOM by filename or file object.
Parse a file into a DOM by filename or file object.
[ "Parse", "a", "file", "into", "a", "DOM", "by", "filename", "or", "file", "object", "." ]
def parse(file, parser=None, bufsize=None): """Parse a file into a DOM by filename or file object.""" if parser is None and not bufsize: from xml.dom import expatbuilder return expatbuilder.parse(file) else: from xml.dom import pulldom return _do_pulldom_parse(pulldom.parse, (file,), {'parser': parser, 'bufsize': bufsize})
[ "def", "parse", "(", "file", ",", "parser", "=", "None", ",", "bufsize", "=", "None", ")", ":", "if", "parser", "is", "None", "and", "not", "bufsize", ":", "from", "xml", ".", "dom", "import", "expatbuilder", "return", "expatbuilder", ".", "parse", "(", "file", ")", "else", ":", "from", "xml", ".", "dom", "import", "pulldom", "return", "_do_pulldom_parse", "(", "pulldom", ".", "parse", ",", "(", "file", ",", ")", ",", "{", "'parser'", ":", "parser", ",", "'bufsize'", ":", "bufsize", "}", ")" ]
https://github.com/bendmorris/static-python/blob/2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473/Lib/xml/dom/minidom.py#L1956-L1964
mchristopher/PokemonGo-DesktopMap
ec37575f2776ee7d64456e2a1f6b6b78830b4fe0
app/pywin/Lib/logging/handlers.py
python
SMTPHandler.emit
(self, record)
Emit a record. Format the record and send it to the specified addressees.
Emit a record.
[ "Emit", "a", "record", "." ]
def emit(self, record): """ Emit a record. Format the record and send it to the specified addressees. """ try: import smtplib from email.utils import formatdate port = self.mailport if not port: port = smtplib.SMTP_PORT smtp = smtplib.SMTP(self.mailhost, port, timeout=self._timeout) msg = self.format(record) msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % ( self.fromaddr, ",".join(self.toaddrs), self.getSubject(record), formatdate(), msg) if self.username: if self.secure is not None: smtp.ehlo() smtp.starttls(*self.secure) smtp.ehlo() smtp.login(self.username, self.password) smtp.sendmail(self.fromaddr, self.toaddrs, msg) smtp.quit() except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record)
[ "def", "emit", "(", "self", ",", "record", ")", ":", "try", ":", "import", "smtplib", "from", "email", ".", "utils", "import", "formatdate", "port", "=", "self", ".", "mailport", "if", "not", "port", ":", "port", "=", "smtplib", ".", "SMTP_PORT", "smtp", "=", "smtplib", ".", "SMTP", "(", "self", ".", "mailhost", ",", "port", ",", "timeout", "=", "self", ".", "_timeout", ")", "msg", "=", "self", ".", "format", "(", "record", ")", "msg", "=", "\"From: %s\\r\\nTo: %s\\r\\nSubject: %s\\r\\nDate: %s\\r\\n\\r\\n%s\"", "%", "(", "self", ".", "fromaddr", ",", "\",\"", ".", "join", "(", "self", ".", "toaddrs", ")", ",", "self", ".", "getSubject", "(", "record", ")", ",", "formatdate", "(", ")", ",", "msg", ")", "if", "self", ".", "username", ":", "if", "self", ".", "secure", "is", "not", "None", ":", "smtp", ".", "ehlo", "(", ")", "smtp", ".", "starttls", "(", "*", "self", ".", "secure", ")", "smtp", ".", "ehlo", "(", ")", "smtp", ".", "login", "(", "self", ".", "username", ",", "self", ".", "password", ")", "smtp", ".", "sendmail", "(", "self", ".", "fromaddr", ",", "self", ".", "toaddrs", ",", "msg", ")", "smtp", ".", "quit", "(", ")", "except", "(", "KeyboardInterrupt", ",", "SystemExit", ")", ":", "raise", "except", ":", "self", ".", "handleError", "(", "record", ")" ]
https://github.com/mchristopher/PokemonGo-DesktopMap/blob/ec37575f2776ee7d64456e2a1f6b6b78830b4fe0/app/pywin/Lib/logging/handlers.py#L918-L948
taowen/es-monitor
c4deceb4964857f495d13bfaf2d92f36734c9e1c
es_sql/executors/select_inside_executor.py
python
SelectInsideBranchExecutor.is_filter_only
(self)
return self._is_filter_only and all(child_executor.is_filter_only() for child_executor in self.children)
[]
def is_filter_only(self): return self._is_filter_only and all(child_executor.is_filter_only() for child_executor in self.children)
[ "def", "is_filter_only", "(", "self", ")", ":", "return", "self", ".", "_is_filter_only", "and", "all", "(", "child_executor", ".", "is_filter_only", "(", ")", "for", "child_executor", "in", "self", ".", "children", ")" ]
https://github.com/taowen/es-monitor/blob/c4deceb4964857f495d13bfaf2d92f36734c9e1c/es_sql/executors/select_inside_executor.py#L164-L165