Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
378,400
def visit_BitVecExtract(self, expression, *operands): op = expression.operands[0] begining = expression.begining end = expression.end size = end - begining + 1 if begining == 0 and end + 1 == op.size: return op elif isinstance(op, BitVecExtract): return BitVecExtract(op.value, op.begining + begining, size, taint=expression.taint) elif isinstance(op, BitVecConcat): new_operands = [] bitcount = 0 for item in reversed(op.operands): if begining >= item.size: begining -= item.size else: if bitcount < expression.size: new_operands.append(item) bitcount += item.size if begining != expression.begining: return BitVecExtract(BitVecConcat(sum([x.size for x in new_operands]), *reversed(new_operands)), begining, expression.size, taint=expression.taint) if isinstance(op, (BitVecAnd, BitVecOr, BitVecXor)): bitoperand_a, bitoperand_b = op.operands return op.__class__(BitVecExtract(bitoperand_a, begining, expression.size), BitVecExtract(bitoperand_b, begining, expression.size), taint=expression.taint)
extract(sizeof(a), 0)(a) ==> a extract(16, 0)( concat(a,b,c,d) ) => concat(c, d) extract(m,M)(and/or/xor a b ) => and/or/xor((extract(m,M) a) (extract(m,M) a)
378,401
def get_billable_items(self): items = [] for obj in self.context.getBillableItems(): if self.is_profile(obj): items.append({ "obj": obj, "title": obj.Title(), "vat": obj.getAnalysisProfileVAT(), "price": self.format_price(obj.getAnalysisProfilePrice()), }) if self.is_analysis(obj): items.append({ "obj": obj, "title": obj.Title(), "vat": obj.getVAT(), "price": self.format_price(obj.getPrice()), }) return items
Return a list of billable items
378,402
def filter(self, index): return Datamat(categories=self._categories, datamat=self, index=index)
Filters a datamat by different aspects. This function is a device to filter the datamat by certain logical conditions. It takes as input a logical array (contains only True or False for every datapoint) and kicks out all datapoints for which the array says False. The logical array can conveniently be created with numpy:: >>> print np.unique(fm.category) np.array([2,9]) >>> fm_filtered = fm[ fm.category == 9 ] >>> print np.unique(fm_filtered) np.array([9]) Parameters: index : array Array-like that contains True for every element that passes the filter; else contains False Returns: datamat : Datamat Instance
378,403
def get_order_parameters(self, structure, n, indices_neighs=None, \ tol=0.0, target_spec=None): if n < 0: raise ValueError("Site index smaller zero!") if n >= len(structure): raise ValueError("Site index beyond maximum!") if indices_neighs is not None: for index in indices_neighs: if index >= len(structure): raise ValueError("Neighbor site index beyond maximum!") if tol < 0.0: raise ValueError("Negative tolerance for weighted solid angle!") left_of_unity = 1.0 - 1.0e-12 very_small = 1.0e-12 fac_bcc = 1.0 / exp(-0.5) centsite = structure[n] if indices_neighs is not None: neighsites = [structure[index] for index in indices_neighs] elif self._voroneigh: vnn = VoronoiNN(tol=tol, targets=target_spec) neighsites = vnn.get_nn(structure, n) else: neighsitestmp = [i[0] for i in structure.get_sites_in_sphere( centsite.coords, self._cutoff)] neighsites = [] if centsite not in neighsitestmp: raise ValueError("Could not find center site!") else: neighsitestmp.remove(centsite) if target_spec is None: neighsites = list(neighsitestmp) else: neighsites[:] = [site for site in neighsitestmp \ if site.specie.symbol == target_spec] nneigh = len(neighsites) self._last_nneigh = nneigh rij = [] rjk = [] rijnorm = [] rjknorm = [] dist = [] distjk_unique = [] distjk = [] centvec = centsite.coords if self._computerijs: for j, neigh in enumerate(neighsites): rij.append((neigh.coords - centvec)) dist.append(np.linalg.norm(rij[j])) rijnorm.append((rij[j] / dist[j])) if self._computerjks: for j, neigh in enumerate(neighsites): rjk.append([]) rjknorm.append([]) distjk.append([]) kk = 0 for k in range(len(neighsites)): if j != k: rjk[j].append(neighsites[k].coords - neigh.coords) distjk[j].append(np.linalg.norm(rjk[j][kk])) if k > j: distjk_unique.append(distjk[j][kk]) rjknorm[j].append(rjk[j][kk] / distjk[j][kk]) kk = kk + 1 ops = [0.0 for t in self._types] for i, t in enumerate(self._types): if t == "cn": ops[i] = nneigh / self._params[i][] elif t == "sgl_bd": dist_sorted = sorted(dist) if len(dist_sorted) == 1: ops[i] = 1.0 elif len(dist_sorted) > 1: ops[i] = 1.0 - dist_sorted[0] / dist_sorted[1] if self._boops: thetas = [] phis = [] for j, vec in enumerate(rijnorm): thetas.append(acos(max(-1.0, min(vec[2], 1.0)))) tmpphi = 0.0 if -left_of_unity < vec[2] < left_of_unity: tmpphi = acos(max(-1.0, min(vec[0] / (sqrt( vec[0] * vec[0] + vec[1] * vec[1])), 1.0))) if vec[1] < 0.0: tmpphi = -tmpphi phis.append(tmpphi) for i, t in enumerate(self._types): if t == "q2": ops[i] = self.get_q2(thetas, phis) if len( thetas) > 0 else None elif t == "q4": ops[i] = self.get_q4(thetas, phis) if len( thetas) > 0 else None elif t == "q6": ops[i] = self.get_q6(thetas, phis) if len( thetas) > 0 else None if self._geomops: gaussthetak = [0.0 for t in self._types] qsptheta = [[[] for j in range(nneigh)] for t in self._types] norms = [[[] for j in range(nneigh)] for t in self._types] ipi = 1.0 / pi piover2 = pi / 2.0 tetangoverpi = acos(-1.0 / 3.0) * ipi itetangminuspihalfoverpi = 1.0 / (tetangoverpi - 0.5) onethird = 1.0 / 3.0 twothird = 2.0 / 3.0 for j in range(nneigh): zaxis = rijnorm[j] kc = 0 for k in range(nneigh): if j != k: for i in range(len(self._types)): qsptheta[i][j].append(0.0) norms[i][j].append(0) tmp = max( -1.0, min(np.inner(zaxis, rijnorm[k]), 1.0)) thetak = acos(tmp) xaxis = gramschmidt(rijnorm[k], zaxis) if np.linalg.norm(xaxis) < very_small: flag_xaxis = True else: xaxis = xaxis / np.linalg.norm(xaxis) flag_xaxis = False if self._comp_azi: flag_yaxis = True yaxis = np.cross(zaxis, xaxis) if np.linalg.norm(yaxis) > very_small: yaxis = yaxis / np.linalg.norm(yaxis) flag_yaxis = False for i, t in enumerate(self._types): if t in ["bent", "sq_pyr_legacy"]: tmp = self._params[i][] * ( thetak * ipi - self._params[i][]) qsptheta[i][j][kc] += exp(-0.5 * tmp * tmp) norms[i][j][kc] += 1 elif t in ["tri_plan", "tri_plan_max", "tet", "tet_max"]: tmp = self._params[i][] * ( thetak * ipi - self._params[i][]) gaussthetak[i] = exp(-0.5 * tmp * tmp) if t in ["tri_plan_max", "tet_max"]: qsptheta[i][j][kc] += gaussthetak[i] norms[i][j][kc] += 1 elif t in ["T", "tri_pyr", "sq_pyr", "pent_pyr", "hex_pyr"]: tmp = self._params[i][] * ( thetak * ipi - 0.5) qsptheta[i][j][kc] += exp(-0.5 * tmp * tmp) norms[i][j][kc] += 1 elif t in ["sq_plan", "oct", "oct_legacy", "cuboct", "cuboct_max"]: if thetak >= self._params[i][]: tmp = self._params[i][] * ( thetak * ipi - 1.0) qsptheta[i][j][kc] += ( self._params[i][] * exp(-0.5 * tmp * tmp)) norms[i][j][kc] += self._params[i][] elif t in ["see_saw_rect", "tri_bipyr", "sq_bipyr", "pent_bipyr", "hex_bipyr", "oct_max", "sq_plan_max", "hex_plan_max"]: if thetak < self._params[i][]: tmp = self._params[i][] * ( thetak * ipi - 0.5) if t != "hex_plan_max" else \ self._params[i][] * ( fabs(thetak * ipi - 0.5) - self._params[i][]) qsptheta[i][j][kc] += exp( -0.5 * tmp * tmp) norms[i][j][kc] += 1 elif t in ["pent_plan", "pent_plan_max"]: tmp = 0.4 if thetak <= self._params[i][ ] * pi \ else 0.8 tmp2 = self._params[i][] * ( thetak * ipi - tmp) gaussthetak[i] = exp(-0.5 * tmp2 * tmp2) if t == "pent_plan_max": qsptheta[i][j][kc] += gaussthetak[i] norms[i][j][kc] += 1 elif t == "bcc" and j < k: if thetak >= self._params[i][]: tmp = self._params[i][] * ( thetak * ipi - 1.0) qsptheta[i][j][kc] += ( self._params[i][] * exp(-0.5 * tmp * tmp)) norms[i][j][kc] += self._params[i][] elif t == "sq_face_cap_trig_pris": if thetak < self._params[i][]: tmp = self._params[i][] * ( thetak * ipi - self._params[i][ ]) qsptheta[i][j][kc] += exp(-0.5 * tmp * tmp) norms[i][j][kc] += 1 for m in range(nneigh): if (m != j) and (m != k) and (not flag_xaxis): tmp = max( -1.0, min(np.inner(zaxis, rijnorm[m]), 1.0)) thetam = acos(tmp) xtwoaxistmp = gramschmidt(rijnorm[m], zaxis) l = np.linalg.norm(xtwoaxistmp) if l < very_small: flag_xtwoaxis = True else: xtwoaxis = xtwoaxistmp / l phi = acos(max( -1.0, min(np.inner(xtwoaxis, xaxis), 1.0))) flag_xtwoaxis = False if self._comp_azi: phi2 = atan2( np.dot(xtwoaxis, yaxis), np.dot(xtwoaxis, xaxis)) if t in ["tri_bipyr", "sq_bipyr", "pent_bipyr", "hex_bipyr", "oct_max", "sq_plan_max", "hex_plan_max", "see_saw_rect"]: if thetam >= self._params[i][]: tmp = self._params[i][] * ( thetam * ipi - 1.0) qsptheta[i][j][kc] += exp( -0.5 * tmp * tmp) norms[i][j][kc] += 1 if not flag_xaxis and not flag_xtwoaxis: for i, t in enumerate(self._types): if t in ["tri_plan", "tri_plan_max", "tet", "tet_max"]: tmp = self._params[i][] * ( thetam * ipi - self._params[i][]) tmp2 = cos( self._params[i][] * phi) ** self._params[i][ ] tmp3 = 1 if t in ["tri_plan_max", "tet_max"] \ else gaussthetak[i] qsptheta[i][j][kc] += tmp3 * exp( -0.5 * tmp * tmp) * tmp2 norms[i][j][kc] += 1 elif t in ["pent_plan", "pent_plan_max"]: tmp = 0.4 if thetam <= \ self._params[i][ ] * pi \ else 0.8 tmp2 = self._params[i][] * ( thetam * ipi - tmp) tmp3 = cos(phi) tmp4 = 1 if t == "pent_plan_max" \ else gaussthetak[i] qsptheta[i][j][kc] += tmp4 * exp( -0.5 * tmp2 * tmp2) * tmp3 * tmp3 norms[i][j][kc] += 1 elif t in ["T", "tri_pyr", "sq_pyr", "pent_pyr", "hex_pyr"]: tmp = cos( self._params[i][] * phi) ** self._params[i][ ] tmp3 = self._params[i][] * ( thetam * ipi - 0.5) qsptheta[i][j][kc] += tmp * exp( -0.5 * tmp3 * tmp3) norms[i][j][kc] += 1 elif t in ["sq_plan", "oct", "oct_legacy"]: if thetak < self._params[i][ ] and \ thetam < self._params[i][ ]: tmp = cos( self._params[i][] * phi) ** self._params[i][ ] tmp2 = self._params[i][ ] * ( thetam * ipi - 0.5) qsptheta[i][j][kc] += tmp * exp( -0.5 * tmp2 * tmp2) if t == "oct_legacy": qsptheta[i][j][kc] -= tmp * \ self._params[ i][ 6] * \ self._params[ i][ 7] norms[i][j][kc] += 1 elif t in ["tri_bipyr", "sq_bipyr", "pent_bipyr", "hex_bipyr", "oct_max", "sq_plan_max", "hex_plan_max"]: if thetam < self._params[i][ ]: if thetak < self._params[i][ ]: tmp = cos(self._params[i][ ] * phi) ** \ self._params[i][ ] tmp2 = self._params[i][ ] * ( thetam * ipi - 0.5) if t != "hex_plan_max" else \ self._params[i][ ] * ( fabs( thetam * ipi - 0.5) - self._params[i][ ]) qsptheta[i][j][ kc] += tmp * exp( -0.5 * tmp2 * tmp2) norms[i][j][kc] += 1 elif t == "bcc" and j < k: if thetak < self._params[i][ ]: if thetak > piover2: fac = 1.0 else: fac = -1.0 tmp = (thetam - piover2) / asin( 1 / 3) qsptheta[i][j][kc] += fac * cos( 3.0 * phi) * fac_bcc * \ tmp * exp( -0.5 * tmp * tmp) norms[i][j][kc] += 1 elif t == "see_saw_rect": if thetam < self._params[i][ ]: if thetak < self._params[i][ ] and phi < 0.75 * pi: tmp = cos(self._params[i][ ] * phi) ** \ self._params[i][ ] tmp2 = self._params[i][ ] * ( thetam * ipi - 0.5) qsptheta[i][j][kc] += tmp * \ exp( -0.5 * tmp2 * tmp2) norms[i][j][kc] += 1.0 elif t in ["cuboct", "cuboct_max"]: if thetam < self._params[i][ ] and \ thetak > self._params[i][ 4] and \ thetak < self._params[i][2]: if thetam > self._params[i][ 4] and \ thetam < \ self._params[i][2]: tmp = cos(phi) tmp2 = self._params[i][ 5] * ( thetam * ipi - 0.5) qsptheta[i][j][ kc] += tmp * tmp * exp( -0.5 * tmp2 * tmp2) norms[i][j][kc] += 1.0 elif thetam < self._params[i][ 4]: tmp = 0.0556 * (cos( phi - 0.5 * pi) - 0.81649658) tmp2 = self._params[i][ 6] * ( thetam * ipi - onethird) qsptheta[i][j][kc] += exp( -0.5 * tmp * tmp) * \ exp( -0.5 * tmp2 * tmp2) norms[i][j][kc] += 1.0 elif thetam > self._params[i][ 2]: tmp = 0.0556 * (cos( phi - 0.5 * pi) - \ 0.81649658) tmp2 = self._params[i][ 6] * ( thetam * ipi - \ twothird) qsptheta[i][j][kc] += exp( -0.5 * tmp * tmp) * \ exp( -0.5 * tmp2 * tmp2) norms[i][j][kc] += 1.0 elif t == "sq_face_cap_trig_pris" and not flag_yaxis: if thetak < self._params[i][]: if thetam < self._params[i][ ]: tmp = cos(self._params[i][ ] * \ phi2) ** \ self._params[i][ ] tmp2 = self._params[i][ ] * ( thetam * ipi - self._params[ i][ ]) else: tmp = cos(self._params[i][ ] * \ (phi2 + self._params[i][ ])) ** \ self._params[i][ ] tmp2 = self._params[i][ ] * ( thetam * ipi - self._params[ i][ ]) qsptheta[i][j][kc] += tmp * exp( -0.5 * tmp2 * tmp2) norms[i][j][kc] += 1 kc += 1 for i, t in enumerate(self._types): if t in ["tri_plan", "tet", "bent", "sq_plan", "oct", "oct_legacy", "cuboct", "pent_plan"]: ops[i] = tmp_norm = 0.0 for j in range(nneigh): ops[i] += sum(qsptheta[i][j]) tmp_norm += float(sum(norms[i][j])) ops[i] = ops[i] / tmp_norm if tmp_norm > 1.0e-12 else None elif t in ["T", "tri_pyr", "see_saw_rect", "sq_pyr", "tri_bipyr", "sq_bipyr", "pent_pyr", "hex_pyr", "pent_bipyr", "hex_bipyr", "oct_max", "tri_plan_max", "tet_max", "sq_plan_max", "pent_plan_max", "cuboct_max", "hex_plan_max", "sq_face_cap_trig_pris"]: ops[i] = None if nneigh > 1: for j in range(nneigh): for k in range(len(qsptheta[i][j])): qsptheta[i][j][k] = qsptheta[i][j][k] / \ norms[i][j][k] \ if norms[i][j][k] > 1.0e-12 else 0.0 ops[i] = max(qsptheta[i][j]) if j == 0 \ else max(ops[i], max(qsptheta[i][j])) elif t == "bcc": ops[i] = 0.0 for j in range(nneigh): ops[i] += sum(qsptheta[i][j]) ops[i] = ops[i] / float(0.5 * float( nneigh * (6 + (nneigh - 2) * (nneigh - 3)))) \ if nneigh > 3 else None elif t == "sq_pyr_legacy": if nneigh > 1: dmean = np.mean(dist) acc = 0.0 for d in dist: tmp = self._params[i][2] * (d - dmean) acc = acc + exp(-0.5 * tmp * tmp) for j in range(nneigh): ops[i] = max(qsptheta[i][j]) if j == 0 \ else max(ops[i], max(qsptheta[i][j])) ops[i] = acc * ops[i] / float(nneigh) else: ops[i] = None if self._geomops2: aij = [] for ir, r in enumerate(rijnorm): for j in range(ir + 1, len(rijnorm)): aij.append( acos(max(-1.0, min(np.inner(r, rijnorm[j]), 1.0)))) aijs = sorted(aij) neighscent = np.array([0.0, 0.0, 0.0]) for j, neigh in enumerate(neighsites): neighscent = neighscent + neigh.coords if nneigh > 0: neighscent = (neighscent / float(nneigh)) h = np.linalg.norm(neighscent - centvec) b = min(distjk_unique) if len(distjk_unique) > 0 else 0 dhalf = max(distjk_unique) / 2.0 if len(distjk_unique) > 0 else 0 for i, t in enumerate(self._types): if t == "reg_tri" or t == "sq": if nneigh < 3: ops[i] = None else: ops[i] = 1.0 if t == "reg_tri": a = 2.0 * asin(b / (2.0 * sqrt(h * h + (b / ( 2.0 * cos(3.0 * pi / 18.0))) ** 2.0))) nmax = 3 elif t == "sq": a = 2.0 * asin( b / (2.0 * sqrt(h * h + dhalf * dhalf))) nmax = 4 for j in range(min([nneigh, nmax])): ops[i] = ops[i] * exp(-0.5 * (( aijs[j] - a) * self._params[i][ 0]) ** 2) return ops
Compute all order parameters of site n. Args: structure (Structure): input structure. n (int): index of site in input structure, for which OPs are to be calculated. Note that we do not use the sites iterator here, but directly access sites via struct[index]. indices_neighs ([int]): list of indices of those neighbors in Structure object structure that are to be considered for OP computation. This optional argument overwrites the way neighbors are to be determined as defined in the constructor (i.e., Voronoi coordination finder via negative cutoff radius vs constant cutoff radius if cutoff was positive). We do not use information about the underlying structure lattice if the neighbor indices are explicitly provided. This has two important consequences. First, the input Structure object can, in fact, be a simple list of Site objects. Second, no nearest images of neighbors are determined when providing an index list. Note furthermore that this neighbor determination type ignores the optional target_spec argument. tol (float): threshold of weight (= solid angle / maximal solid angle) to determine if a particular pair is considered neighbors; this is relevant only in the case when Voronoi polyhedra are used to determine coordination target_spec (Specie): target species to be considered when calculating the order parameters of site n; None includes all species of input structure. Returns: [floats]: representing order parameters. Should it not be possible to compute a given OP for a conceptual reason, the corresponding entry is None instead of a float. For Steinhardt et al.'s bond orientational OPs and the other geometric OPs ("tet", "oct", "bcc", etc.), this can happen if there is a single neighbor around site n in the structure because that does not permit calculation of angles between multiple neighbors.
378,404
def filter(cls, filters, iterable): if isinstance(filters, Filter): filters = [filters] for filter in filters: iterable = filter.generator(iterable) return iterable
Returns the elements in `iterable` that pass given `filters`
378,405
def split_unescape(s, delim, escape=, unescape=True): ret = [] current = [] itr = iter(s) for ch in itr: if ch == escape: try: if not unescape: current.append(escape) current.append(next(itr)) except StopIteration: if unescape: current.append(escape) elif ch == delim: ret.append(.join(current)) current = [] else: current.append(ch) ret.append(.join(current)) return ret
>>> split_unescape('foo,bar', ',') ['foo', 'bar'] >>> split_unescape('foo$,bar', ',', '$') ['foo,bar'] >>> split_unescape('foo$$,bar', ',', '$', unescape=True) ['foo$', 'bar'] >>> split_unescape('foo$$,bar', ',', '$', unescape=False) ['foo$$', 'bar'] >>> split_unescape('foo$', ',', '$', unescape=True) ['foo$']
378,406
def remotesByConnected(self): conns, disconns = [], [] for r in self.remotes.values(): array = conns if self.isRemoteConnected(r) else disconns array.append(r) return conns, disconns
Partitions the remotes into connected and disconnected :return: tuple(connected remotes, disconnected remotes)
378,407
def versions(self): print % (color.LightBlue, self.version, color.Normal) print self.workbench.help()
Announce Versions of CLI and Server Args: None Returns: The running versions of both the CLI and the Workbench Server
378,408
def __create_dynamic_connections(self): if (self._stimulus is None): raise NameError("Stimulus should initialed before creation of the dynamic connections in the network."); self._dynamic_coupling = [ [0] * self._num_osc for i in range(self._num_osc)]; for i in range(self._num_osc): neighbors = self.get_neighbors(i); if ( (len(neighbors) > 0) and (self._stimulus[i] > 0) ): number_stimulated_neighbors = 0.0; for j in neighbors: if (self._stimulus[j] > 0): number_stimulated_neighbors += 1.0; if (number_stimulated_neighbors > 0): dynamic_weight = self._params.Wt / number_stimulated_neighbors; for j in neighbors: self._dynamic_coupling[i][j] = dynamic_weight;
! @brief Create dynamic connection in line with input stimulus.
378,409
def _call_scope(self, scope, *args, **kwargs): result = getattr(self._model, scope)(self, *args, **kwargs) return result or self
Call the given model scope. :param scope: The scope to call :type scope: str
378,410
def deps_from_pyp_format(requires, runtime=True): parsed = [] logger.debug("Dependencies from setup.py: {0} runtime: {1}.".format( requires, runtime)) for req in requires: try: parsed.append(Requirement.parse(req)) except ValueError: logger.warn("Unparsable dependency {0}.".format(req), exc_info=True) in_rpm_format = [] for dep in parsed: in_rpm_format.extend(dependency_to_rpm(dep, runtime)) logger.debug("Dependencies from setup.py in rpm format: {0}.".format( in_rpm_format)) return in_rpm_format
Parses dependencies extracted from setup.py. Args: requires: list of dependencies as written in setup.py of the package. runtime: are the dependencies runtime (True) or build time (False)? Returns: List of semi-SPECFILE dependencies (see dependency_to_rpm for format).
378,411
def function(self, x, y, amp, R_sersic, n_sersic, e1, e2, center_x=0, center_y=0): R_sersic = np.maximum(0, R_sersic) phi_G, q = param_util.ellipticity2phi_q(e1, e2) x_shift = x - center_x y_shift = y - center_y cos_phi = np.cos(phi_G) sin_phi = np.sin(phi_G) xt1 = cos_phi*x_shift+sin_phi*y_shift xt2 = -sin_phi*x_shift+cos_phi*y_shift xt2difq2 = xt2/(q*q) R_ = np.sqrt(xt1*xt1+xt2*xt2difq2) if isinstance(R_, int) or isinstance(R_, float): R_ = max(self._smoothing, R_) else: R_[R_ < self._smoothing] = self._smoothing k, bn = self.k_bn(n_sersic, R_sersic) R_frac = R_/R_sersic R_frac = R_frac.astype(np.float32) if isinstance(R_, int) or isinstance(R_, float): if R_frac > 100: result = 0 else: exponent = -bn*(R_frac**(1./n_sersic)-1.) result = amp * np.exp(exponent) else: R_frac_real = R_frac[R_frac <= 100] exponent = -bn*(R_frac_real**(1./n_sersic)-1.) result = np.zeros_like(R_) result[R_frac <= 100] = amp * np.exp(exponent) return np.nan_to_num(result)
returns Sersic profile
378,412
def get_pixel(self, x: int, y: int) -> Tuple[int, int, int]: color = lib.TCOD_image_get_pixel(self.image_c, x, y) return color.r, color.g, color.b
Get the color of a pixel in this Image. Args: x (int): X pixel of the Image. Starting from the left at 0. y (int): Y pixel of the Image. Starting from the top at 0. Returns: Tuple[int, int, int]: An (r, g, b) tuple containing the pixels color value. Values are in a 0 to 255 range.
378,413
def add(self, interval): if interval in self: return if interval.is_null(): raise ValueError( "IntervalTree: Null Interval objects not allowed in IntervalTree:" " {0}".format(interval) ) if not self.top_node: self.top_node = Node.from_interval(interval) else: self.top_node = self.top_node.add(interval) self.all_intervals.add(interval) self._add_boundaries(interval)
Adds an interval to the tree, if not already present. Completes in O(log n) time.
378,414
def sanitize_for_archive(url, headers, payload): if MeetupClient.PKEY in payload: payload.pop(MeetupClient.PKEY) if MeetupClient.PSIGN in payload: payload.pop(MeetupClient.PSIGN) return url, headers, payload
Sanitize payload of a HTTP request by removing the token information before storing/retrieving archived items :param: url: HTTP url request :param: headers: HTTP headers request :param: payload: HTTP payload request :returns url, headers and the sanitized payload
378,415
def save_config(self, cmd="write memory", confirm=False, confirm_response=""): return super(UbiquitiEdgeSSH, self).save_config( cmd=cmd, confirm=confirm, confirm_response=confirm_response )
Saves configuration.
378,416
def get_grade_system_form(self, *args, **kwargs): if isinstance(args[-1], list) or in kwargs: return self.get_grade_system_form_for_create(*args, **kwargs) else: return self.get_grade_system_form_for_update(*args, **kwargs)
Pass through to provider GradeSystemAdminSession.get_grade_system_form_for_update
378,417
def AnalizarXml(self, xml=""): "Analiza un mensaje XML (por defecto el ticket de acceso)" try: if not xml or xml==: xml = self.XmlResponse elif xml==: xml = self.XmlRequest self.xml = SimpleXMLElement(xml) return True except Exception, e: self.Excepcion = traceback.format_exception_only( sys.exc_type, sys.exc_value)[0] return False
Analiza un mensaje XML (por defecto el ticket de acceso)
378,418
def delete_mount_cache(real_name): * cache = salt.utils.mount.read_cache(__opts__) if cache: if in cache: if real_name in cache[]: del cache[][real_name] cache_write = salt.utils.mount.write_cache(cache, __opts__) if not cache_write: raise CommandExecutionError() return True
.. versionadded:: 2018.3.0 Provide information if the path is mounted CLI Example: .. code-block:: bash salt '*' mount.delete_mount_cache /mnt/share
378,419
def infer_sedes(obj): if is_sedes(obj.__class__): return obj.__class__ elif not isinstance(obj, bool) and isinstance(obj, int) and obj >= 0: return big_endian_int elif BinaryClass.is_valid_type(obj): return binary elif not isinstance(obj, str) and isinstance(obj, collections.Sequence): return List(map(infer_sedes, obj)) elif isinstance(obj, bool): return boolean elif isinstance(obj, str): return text msg = .format(type(obj).__name__) raise TypeError(msg)
Try to find a sedes objects suitable for a given Python object. The sedes objects considered are `obj`'s class, `big_endian_int` and `binary`. If `obj` is a sequence, a :class:`rlp.sedes.List` will be constructed recursively. :param obj: the python object for which to find a sedes object :raises: :exc:`TypeError` if no appropriate sedes could be found
378,420
def teardown_cluster(config_file, yes, workers_only, override_cluster_name): config = yaml.load(open(config_file).read()) if override_cluster_name is not None: config["cluster_name"] = override_cluster_name validate_config(config) config = fillout_defaults(config) confirm("This will destroy your cluster", yes) provider = get_node_provider(config["provider"], config["cluster_name"]) try: def remaining_nodes(): if workers_only: A = [] else: A = [ node_id for node_id in provider.non_terminated_nodes({ TAG_RAY_NODE_TYPE: "head" }) ] A += [ node_id for node_id in provider.non_terminated_nodes({ TAG_RAY_NODE_TYPE: "worker" }) ] return A A = remaining_nodes() with LogTimer("teardown_cluster: Termination done."): while A: logger.info("teardown_cluster: " "Terminating {} nodes...".format(len(A))) provider.terminate_nodes(A) time.sleep(1) A = remaining_nodes() finally: provider.cleanup()
Destroys all nodes of a Ray cluster described by a config json.
378,421
def freeze(obj): if isinstance(obj, collections.Mapping): return FrozenDict({freeze(k): freeze(v) for k, v in six.iteritems(obj)}) elif isinstance(obj, list): return FrozenList([freeze(e) for e in obj]) else: return obj
Transform tree of dict and list in read-only data structure. dict instances are transformed to FrozenDict, lists in FrozenList.
378,422
def lucas_gas(T, Tc, Pc, Zc, MW, dipole=0, CASRN=None): r Tr = T/Tc xi = 0.176*(Tc/MW**3/(Pc/1E5)**4)**(1/6.) if dipole is None: dipole = 0 dipoler = 52.46*dipole**2*(Pc/1E5)/Tc**2 if dipoler < 0.022: Fp = 1 elif 0.022 <= dipoler < 0.075: Fp = 1 + 30.55*(0.292 - Zc)**1.72 else: Fp = 1 + 30.55*(0.292 - Zc)**1.72*abs(0.96 + 0.1*(Tr-0.7)) if CASRN and CASRN in _lucas_Q_dict: Q = _lucas_Q_dict[CASRN] if Tr - 12 > 0: value = 1 else: value = -1 FQ = 1.22*Q**0.15*(1 + 0.00385*((Tr-12)**2)**(1./MW)*value) else: FQ = 1 eta = (0.807*Tr**0.618 - 0.357*exp(-0.449*Tr) + 0.340*exp(-4.058*Tr) + 0.018)*Fp*FQ/xi return eta/1E7
r'''Estimate the viscosity of a gas using an emperical formula developed in several sources, but as discussed in [1]_ as the original sources are in German or merely personal communications with the authors of [1]_. .. math:: \eta = \left[0.807T_r^{0.618}-0.357\exp(-0.449T_r) + 0.340\exp(-4.058 T_r) + 0.018\right]F_p^\circ F_Q^\circ /\xi F_p^\circ=1, 0 \le \mu_{r} < 0.022 F_p^\circ = 1+30.55(0.292-Z_c)^{1.72}, 0.022 \le \mu_{r} < 0.075 F_p^\circ = 1+30.55(0.292-Z_c)^{1.72}|0.96+0.1(T_r-0.7)| 0.075 < \mu_{r} F_Q^\circ = 1.22Q^{0.15}\left\{ 1+0.00385[(T_r-12)^2]^{1/M}\text{sign} (T_r-12)\right\} \mu_r = 52.46 \frac{\mu^2 P_c}{T_c^2} \xi=0.176\left(\frac{T_c}{MW^3 P_c^4}\right)^{1/6} Parameters ---------- T : float Temperature of fluid [K] Tc: float Critical point of fluid [K] Pc : float Critical pressure of the fluid [Pa] Zc : float Critical compressibility of the fluid [Pa] dipole : float Dipole moment of fluid [debye] CASRN : str, optional CAS of the fluid Returns ------- mu_g : float Viscosity of gas, [Pa*s] Notes ----- The example is from [1]_; all results agree. Viscosity is calculated in micropoise, and converted to SI internally (1E-7). Q for He = 1.38; Q for H2 = 0.76; Q for D2 = 0.52. Examples -------- >>> lucas_gas(T=550., Tc=512.6, Pc=80.9E5, Zc=0.224, MW=32.042, dipole=1.7) 1.7822676912698928e-05 References ---------- .. [1] Reid, Robert C.; Prausnitz, John M.; Poling, Bruce E. Properties of Gases and Liquids. McGraw-Hill Companies, 1987.
378,423
def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): if len(mode) > 1 or mode not in "rw": raise ValueError("mode must be or ") try: import gzip gzip.GzipFile except (ImportError, AttributeError): raise CompressionError("gzip module is not available") extfileobj = fileobj is not None try: fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj) t = cls.taropen(name, mode, fileobj, **kwargs) except IOError: if not extfileobj and fileobj is not None: fileobj.close() if fileobj is None: raise raise ReadError("not a gzip file") except: if not extfileobj and fileobj is not None: fileobj.close() raise t._extfileobj = extfileobj return t
Open gzip compressed tar archive name for reading or writing. Appending is not allowed.
378,424
def gdaOnes(shape, dtype, numGhosts=1): res = GhostedDistArray(shape, dtype) res.setNumberOfGhosts(numGhosts) res[:] = 1 return res
ghosted distributed array one constructor @param shape the shape of the array @param dtype the numpy data type @param numGhosts the number of ghosts (>= 0)
378,425
def _to_dict(self): _dict = {} if hasattr(self, ) and self.text is not None: _dict[] = self.text if hasattr(self, ) and self.score is not None: _dict[] = self.score return _dict
Return a json dictionary representing this model.
378,426
def update_one_time_key_counts(self, counts): self.one_time_keys_manager.server_counts = counts if self.one_time_keys_manager.should_upload(): logger.info() self.upload_one_time_keys()
Update data on one-time keys count and upload new ones if necessary. Args: counts (dict): Counts of keys currently on the HS for each key type.
378,427
def export(export_path, vocabulary, embeddings, num_oov_buckets, preprocess_text): tmpdir = tempfile.mkdtemp() vocabulary_file = os.path.join(tmpdir, "tokens.txt") with tf.gfile.GFile(vocabulary_file, "w") as f: f.write("\n".join(vocabulary)) vocab_size = len(vocabulary) embeddings_dim = embeddings.shape[1] spec = make_module_spec(vocabulary_file, vocab_size, embeddings_dim, num_oov_buckets, preprocess_text) try: with tf.Graph().as_default(): m = hub.Module(spec) p_embeddings = tf.placeholder(tf.float32) load_embeddings = tf.assign(m.variable_map[EMBEDDINGS_VAR_NAME], p_embeddings) with tf.Session() as sess: sess.run([load_embeddings], feed_dict={p_embeddings: embeddings}) m.export(export_path, sess) finally: shutil.rmtree(tmpdir)
Exports a TF-Hub module that performs embedding lookups. Args: export_path: Location to export the module. vocabulary: List of the N tokens in the vocabulary. embeddings: Numpy array of shape [N+K,M] the first N rows are the M dimensional embeddings for the respective tokens and the next K rows are for the K out-of-vocabulary buckets. num_oov_buckets: How many out-of-vocabulary buckets to add. preprocess_text: Whether to preprocess the input tensor by removing punctuation and splitting on spaces.
378,428
def add_data(self, data, table, delimiter=, bands=, clean_up=True, rename_columns={}, column_fill={}, verbose=False): entry, del_records = data, [] if isinstance(data, str) and os.path.isfile(data): data = ii.read(data, delimiter=delimiter) elif isinstance(data, (list, tuple, np.ndarray)): data = ii.read([.join(map(str, row)) for row in data], data_start=1, delimiter=) elif isinstance(data, pd.core.frame.DataFrame): data = at.Table.from_pandas(data)
Adds data to the specified database table. Column names must match table fields to insert, however order and completeness don't matter. Parameters ---------- data: str, array-like, astropy.table.Table The path to an ascii file, array-like object, or table. The first row or element must be the list of column names table: str The name of the table into which the data should be inserted delimiter: str The string to use as the delimiter when parsing the ascii file bands: sequence Sequence of band to look for in the data header when digesting columns of multiple photometric measurements (e.g. ['MKO_J','MKO_H','MKO_K']) into individual rows of data for database insertion clean_up: bool Run self.clean_up() rename_columns: dict A dictionary of the {input_col_name:desired_col_name} for table columns, e.g. {'e_Jmag':'J_unc', 'RAJ2000':'ra'} column_fill: dict A dictionary of the column name and value to fill, e.g. {'instrument_id':2, 'band':'2MASS.J'} verbose: bool Print diagnostic messages
378,429
def new_with_array(self, array): arguments = vars(self) arguments.update({"array": array}) if in arguments: arguments.pop("centre") return self.__class__(**arguments)
Parameters ---------- array: ndarray An ndarray Returns ------- new_array: ScaledSquarePixelArray A new instance of this class that shares all of this instances attributes with a new ndarray.
378,430
def _read_requirements(metadata, extras): extras = extras or () requirements = [] for entry in metadata.run_requires: if isinstance(entry, six.text_type): entry = {"requires": [entry]} extra = None else: extra = entry.get("extra") if extra is not None and extra not in extras: continue for line in entry.get("requires", []): r = requirementslib.Requirement.from_line(line) if r.markers: contained = get_contained_extras(r.markers) if (contained and not any(e in contained for e in extras)): continue marker = get_without_extra(r.markers) r.markers = str(marker) if marker else None line = r.as_line(include_hashes=False) requirements.append(line) return requirements
Read wheel metadata to know what it depends on. The `run_requires` attribute contains a list of dict or str specifying requirements. For dicts, it may contain an "extra" key to specify these requirements are for a specific extra. Unfortunately, not all fields are specificed like this (I don't know why); some are specified with markers. So we jump though these terrible hoops to know exactly what we need. The extra extraction is not comprehensive. Tt assumes the marker is NEVER something like `extra == "foo" and extra == "bar"`. I guess this never makes sense anyway? Markers are just terrible.
378,431
def business_match_query(self, **kwargs): if not kwargs.get(): raise ValueError() if not kwargs.get(): raise ValueError() if not kwargs.get(): raise ValueError() if not kwargs.get(): raise ValueError() if not kwargs.get(): raise ValueError() return self._query(BUSINESS_MATCH_API_URL, **kwargs)
Query the Yelp Business Match API. documentation: https://www.yelp.com/developers/documentation/v3/business_match required parameters: * name - business name * city * state * country * address1 NOTE: `match_type` is deprecated since April 1, 2019.
378,432
def return_multiple_convert_numpy_base(dbpath, folder_path, set_object, start_id, end_id, converter, add_args=None): engine = create_engine( + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() tmp_object = session.query(set_object).get(start_id) if add_args is None: converted = converter(join(folder_path, tmp_object.path)) else: converted = converter(join(folder_path, tmp_object.path), add_args) if len(converted.shape) == 0: columns_amt = 1 else: columns_amt = converted.shape[0] return_array = np.zeros([end_id - start_id + 1, columns_amt]) for i in xrange(end_id - start_id + 1): tmp_object = session.query(set_object).get(start_id + i) if add_args is None: return_array[i, :] = converter(join(folder_path, tmp_object.path)) else: return_array[i, :] = converter(join(folder_path, tmp_object.path), add_args) session.close() return return_array
Generic function which converts several objects, with ids in the range (start_id, end_id) into a 2d numpy array and returns the array, the conversion is done by the 'converter' function Parameters ---------- dbpath : string, path to SQLite database file folder_path : string, path to folder where the files are stored set_object : object (either TestSet or TrainSet) which is stored in the database start_id : the id of the first object to be converted end_id : the id of the last object to be converted converter : function, which takes the path of a data point and *args as parameters and returns a numpy array add_args : optional arguments for the converter (list/dictionary/tuple/whatever). if None, the converter should take only one input argument - the file path. default value: None Returns ------- result : 2-dimensional ndarray
378,433
def enforce(self, rule, target, creds, exc=None, *args, **kwargs): self.load_rules() if isinstance(rule, checks.BaseCheck): result = rule(target, creds, self, rule) elif not self.rules: result = False if self.raise_error and not result: if exc: raise exc(*args, **kwargs) else: raise PolicyNotAuthorized(rule, target, creds) return result
Checks authorization of a rule against the target and credentials.
378,434
def _regex_flags_from_bits(self, bits): flags = return .join(flags[i - 1] if (1 << i) & bits else for i in range(1, len(flags) + 1))
Return the textual equivalent of numerically encoded regex flags.
378,435
def commit_input_persist_id(self, **kwargs): config = ET.Element("config") commit = ET.Element("commit") config = commit input = ET.SubElement(commit, "input") persist_id = ET.SubElement(input, "persist-id") persist_id.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
378,436
def cli(env, sortby, cpu, domain, datacenter, hostname, memory, network, hourly, monthly, tag, columns, limit): vsi = SoftLayer.VSManager(env.client) guests = vsi.list_instances(hourly=hourly, monthly=monthly, hostname=hostname, domain=domain, cpus=cpu, memory=memory, datacenter=datacenter, nic_speed=network, tags=tag, mask=columns.mask(), limit=limit) table = formatting.Table(columns.columns) table.sortby = sortby for guest in guests: table.add_row([value or formatting.blank() for value in columns.row(guest)]) env.fout(table)
List virtual servers.
378,437
def reload_core(host=None, core_name=None): hostsuccessdataerrorswarnings*successdataerrorswarnings ret = _get_return_dict() if not _check_for_cores(): err = [] return ret.update({: False, : err}) if _get_none_or_value(core_name) is None and _check_for_cores(): success = True for name in __opts__[]: resp = reload_core(host, name) if not resp[]: success = False data = {name: {: resp[]}} ret = _update_return_dict(ret, success, data, resp[], resp[]) return ret extra = [, .format(core_name)] url = _format_url(, host=host, core_name=None, extra=extra) return _http_request(url)
MULTI-CORE HOSTS ONLY Load a new core from the same configuration as an existing registered core. While the "new" core is initializing, the "old" one will continue to accept requests. Once it has finished, all new request will go to the "new" core, and the "old" core will be unloaded. host : str (None) The solr host to query. __opts__['host'] is default. core_name : str The name of the core to reload Return : dict<str,obj>:: {'success':boolean, 'data':dict, 'errors':list, 'warnings':list} CLI Example: .. code-block:: bash salt '*' solr.reload_core None music Return data is in the following format:: {'success':bool, 'data':dict, 'errors':list, 'warnings':list}
378,438
def grouping_val(self): grouping = self.grouping if grouping is None: return ST_Grouping.CLUSTERED val = grouping.val if val is None: return ST_Grouping.CLUSTERED return val
Return the value of the ``./c:grouping{val=?}`` attribute, taking defaults into account when items are not present.
378,439
def get_social_accounts(user): accounts = {} for account in user.socialaccount_set.all().iterator(): providers = accounts.setdefault(account.provider, []) providers.append(account) return accounts
{% get_social_accounts user as accounts %} Then: {{accounts.twitter}} -- a list of connected Twitter accounts {{accounts.twitter.0}} -- the first Twitter account {% if accounts %} -- if there is at least one social account
378,440
def pages(self): pages = [] for har_dict in self.har_data: har_parser = HarParser(har_data=har_dict) if self.page_id: for page in har_parser.pages: if page.page_id == self.page_id: pages.append(page) else: pages = pages + har_parser.pages return pages
The aggregate pages of all the parser objects.
378,441
def __netjson_channel_width(self, radio): htmode = radio.pop() if htmode == : return 20 channel_width = htmode.replace(, ).replace(, ) if in channel_width or in channel_width: radio[] = htmode channel_width = channel_width[0:-1] return int(channel_width)
determines NetJSON channel_width radio attribute
378,442
def update(self): if self.input_method == : if self._thread is None: thread_is_running = False else: thread_is_running = self._thread.isAlive() if self.timer_ports.finished() and not thread_is_running: self._thread = ThreadScanner(self.stats) self._thread.start() if len(self.stats) > 0: self.timer_ports = Timer(self.stats[0][]) else: self.timer_ports = Timer(0) else: pass return self.stats
Update the ports list.
378,443
def unwrap(tensor): while isinstance(tensor, (PrettyTensor, Loss)): tensor = tensor.tensor return tensor
Returns the underlying tensor if tensor is wrapped or tensor. Args: tensor: The tensor to unwrap. Returns: Tensor or if it is a pretty tensor, the unwrapped version. Raises: ValueError: if tensor holds a sequence.
378,444
def structure_to_abivars(structure, **kwargs): if not structure.is_ordered: raise ValueError() types_of_specie = structure.types_of_specie natom = structure.num_sites znucl_type = [specie.number for specie in types_of_specie] znucl_atoms = structure.atomic_numbers typat = np.zeros(natom, np.int) for atm_idx, site in enumerate(structure): typat[atm_idx] = types_of_specie.index(site.specie) + 1 rprim = ArrayWithUnit(structure.lattice.matrix, "ang").to("bohr") angdeg = structure.lattice.angles xred = np.reshape([site.frac_coords for site in structure], (-1, 3)) rprim = np.where(np.abs(rprim) > 1e-8, rprim, 0.0) xred = np.where(np.abs(xred) > 1e-8, xred, 0.0) d = dict( natom=natom, ntypat=len(types_of_specie), typat=typat, znucl=znucl_type, xred=xred, ) geomode = kwargs.pop("geomode", "rprim") if geomode == "automatic": geomode = "rprim" if structure.lattice.is_hexagonal: geomode = "angdeg" angdeg = structure.lattice.angles if geomode == "rprim": d.update( acell=3 * [1.0], rprim=rprim, ) elif geomode == "angdeg": d.update( acell=ArrayWithUnit(structure.lattice.abc, "ang").to("bohr"), angdeg=angdeg, ) else: raise ValueError("Wrong value for geomode: %s" % geomode) return d
Receives a structure and returns a dictionary with the ABINIT variables.
378,445
def snmp_server_group_write(self, **kwargs): config = ET.Element("config") snmp_server = ET.SubElement(config, "snmp-server", xmlns="urn:brocade.com:mgmt:brocade-snmp") group = ET.SubElement(snmp_server, "group") group_name_key = ET.SubElement(group, "group-name") group_name_key.text = kwargs.pop() group_version_key = ET.SubElement(group, "group-version") group_version_key.text = kwargs.pop() write = ET.SubElement(group, "write") write.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
378,446
def controlled_by(self, *control_qubits: Qid) -> : from cirq.ops import ControlledOperation if control_qubits is None or len(control_qubits) is 0: raise ValueError( "Can't get controlled operation without control qubit. Op: {}" .format(repr(self))) else: return ControlledOperation(control_qubits, self)
Returns a controlled version of this operation. Args: control_qubits: Qubits to control the operation by. Required.
378,447
def addEvent(self, event, fd, action): self._events[event] = (fd, action)
Add a new win32 event to the event loop.
378,448
def as_dict(self, **extra): return [self._construct_email(email, **extra) for email in self.emails]
Converts all available emails to dictionaries. :return: List of dictionaries.
378,449
def remove(self, container, force=True, volumes=True): super().remove(container, force=force, v=volumes)
Remove a container. :param container: The container to remove. :param force: Whether to force the removal of the container, even if it is running. Note that this defaults to True, unlike the Docker default. :param volumes: Whether to remove any volumes that were created implicitly with this container, i.e. any volumes that were created due to ``VOLUME`` directives in the Dockerfile. External volumes that were manually created will not be removed. Note that this defaults to True, unlike the Docker default (where the equivalent parameter, ``v``, defaults to False).
378,450
def _write_widget(self, val): self._itsme = True try: setter = self._wid_info[self._wid][1] wtype = self._wid_info[self._wid][2] if setter: if wtype is not None: setter(self._wid, self._cast_value(val, wtype)) else: setter(self._wid, val) finally: self._itsme = False
Writes value into the widget. If specified, user setter is invoked.
378,451
def delete_channel_cb(self, viewer, channel): chname = channel.name del self.name_dict[chname] un_hilite_set = set([]) for path in self._hl_path: if path[0] == chname: un_hilite_set.add(path) self._hl_path -= un_hilite_set if self.gui_up: self.recreate_toc() self._rebuild_channels()
Called when a channel is deleted from the main interface. Parameter is a channel (a Channel object).
378,452
def set_flair(self, subreddit, item, flair_text=, flair_css_class=): data = {: six.text_type(subreddit), : flair_text or , : flair_css_class or } if isinstance(item, objects.Submission): data[] = item.fullname evict = item.permalink else: data[] = six.text_type(item) evict = self.config[].format( subreddit=six.text_type(subreddit)) response = self.request_json(self.config[], data=data) self.evict(evict) return response
Set flair for the user in the given subreddit. `item` can be a string, Redditor object, or Submission object. If `item` is a string it will be treated as the name of a Redditor. This method can only be called by a subreddit moderator with flair permissions. To set flair on yourself or your own links use :meth:`~praw.__init__.AuthenticatedReddit.select_flair`. :returns: The json response from the server.
378,453
def replace_blocks(self, blocks): start = 0 bulk_insert = self.bulk_insert blocks_len = len(blocks) select = query = \ execute = self.cursor.execute while start < blocks_len: rows = blocks[start:start+bulk_insert] params = [param for params in rows for param in params] insert = (select + ) * (len(rows) - 1) + select execute(query + insert, params) start += bulk_insert
Replace multiple blocks. blocks must be a list of tuples where each tuple consists of (namespace, offset, key, data, flags)
378,454
def install(self, goal=None, first=False, replace=False, before=None, after=None): goal = Goal.by_name(goal or self.name) goal.install(self, first, replace, before, after) return goal
Install the task in the specified goal (or a new goal with the same name as the task). The placement of the task in the execution list of the goal defaults to the end but can be :rtype : object influence by specifying exactly one of the following arguments: :API: public :param first: Places this task 1st in the goal's execution list. :param replace: Replaces any existing tasks in the goal with this goal. :param before: Places this task before the named task in the goal's execution list. :param after: Places this task after the named task in the goal's execution list. :returns: The goal with task installed.
378,455
def update_PCA_box(self): if self.s in list(self.pmag_results_data[].keys()): if self.current_fit: tmin = self.current_fit.tmin tmax = self.current_fit.tmax calculation_type = self.current_fit.PCA_type else: calculation_type = self.PCA_type_box.GetValue() PCA_type = "None" if calculation_type == "DE-BFL": PCA_type = "line" elif calculation_type == "DE-BFL-A": PCA_type = "line-anchored" elif calculation_type == "DE-BFL-O": PCA_type = "line-with-origin" elif calculation_type == "DE-FM": PCA_type = "Fisher" elif calculation_type == "DE-BFP": PCA_type = "plane" else: print("no PCA type found setting to line") PCA_type = "line" self.PCA_type_box.SetStringSelection(PCA_type)
updates PCA box with current fit's PCA type
378,456
def set_source(self, propname, pores): r locs = self.tomask(pores=pores) if (not np.all(np.isnan(self[][locs]))) or \ (not np.all(np.isnan(self[][locs]))): raise Exception( + ) self[propname] = locs self.settings[].append(propname)
r""" Applies a given source term to the specified pores Parameters ---------- propname : string The property name of the source term model to be applied pores : array_like The pore indices where the source term should be applied Notes ----- Source terms cannot be applied in pores where boundary conditions have already been set. Attempting to do so will result in an error being raised.
378,457
def _update_limits_from_api(self): self.connect() logger.debug("Querying ELB DescribeAccountLimits for limits") attribs = self.conn.describe_account_limits() name_to_limits = { : , : , : } for attrib in attribs[]: if int(attrib.get(, 0)) == 0: continue name = attrib.get(, ) if name not in name_to_limits: continue self.limits[name_to_limits[name]]._set_api_limit(int(attrib[])) self.conn2 = client(, **self._boto3_connection_kwargs) logger.debug("Connected to %s in region %s", , self.conn2._client_config.region_name) logger.debug("Querying ELBv2 (ALB) DescribeAccountLimits for limits") attribs = self.conn2.describe_account_limits() name_to_limits = { : , : , : , : , : } for attrib in attribs[]: if int(attrib.get(, 0)) == 0: continue name = attrib.get(, ) if name not in name_to_limits: continue self.limits[name_to_limits[name]]._set_api_limit(int(attrib[])) logger.debug("Done setting limits from API")
Query ELB's DescribeAccountLimits API action, and update limits with the quotas returned. Updates ``self.limits``.
378,458
def _aggregrate_scores(its,tss,num_sentences): final = [] for i,el in enumerate(its): for j, le in enumerate(tss): if el[2] == le[2]: assert el[1] == le[1] final.append((el[1],i+j,el[2])) _final = sorted(final, key = lambda tup: tup[1])[:num_sentences] return sorted(_final, key = lambda tup: tup[0])
rerank the two vectors by min aggregrate rank, reorder
378,459
def allowed_info_messages(*info_messages): def wrapper(func): setattr(func, ALLOWED_INFO_MESSAGES, info_messages) return func return wrapper
Decorator ignoring defined info messages at the end of test method. As param use what :py:meth:`~.WebdriverWrapperInfoMixin.get_info_messages` returns. .. versionadded:: 2.0
378,460
def obfuscate(cls, idStr): return unicode(base64.urlsafe_b64encode( idStr.encode()).replace(b, b))
Mildly obfuscates the specified ID string in an easily reversible fashion. This is not intended for security purposes, but rather to dissuade users from depending on our internal ID structures.
378,461
def get_extension_classes(): res = [SyntaxHighlightingExtension, SearchExtension, TagExtension, DevhelpExtension, LicenseExtension, GitUploadExtension, EditOnGitHubExtension] if sys.version_info[1] >= 5: res += [DBusExtension] try: from hotdoc.extensions.c.c_extension import CExtension res += [CExtension] except ImportError: pass try: from hotdoc.extensions.gi.gi_extension import GIExtension res += [GIExtension] except ImportError: pass return res
Hotdoc's setuptools entry point
378,462
def magic_file(filename): head, foot = _file_details(filename) if not head: raise ValueError("Input was empty") try: info = _identify_all(head, foot, ext_from_filename(filename)) except PureError: info = [] info.sort(key=lambda x: x.confidence, reverse=True) return info
Returns tuple of (num_of_matches, array_of_matches) arranged highest confidence match first. :param filename: path to file :return: list of possible matches, highest confidence first
378,463
def file_to_md5(filename, block_size=8192): md5 = hashlib.md5() with open(filename, ) as f: while True: data = f.read(block_size) if not data: break md5.update(data) return md5.hexdigest()
Calculate the md5 hash of a file. Memory-friendly solution, it reads the file piece by piece. See stackoverflow.com/questions/1131220/ :param filename: filename to convert :param block_size: size of block :return: MD5 hash of file content
378,464
def _map_trajectory(self): self.trajectory_map = {} with open(self.filepath, ) as trajectory_file: with closing( mmap( trajectory_file.fileno(), 0, access=ACCESS_READ)) as mapped_file: progress = 0 line = 0 frame = -1 frame_start = 0 while progress <= len(mapped_file): line = line + 1 bline = mapped_file.readline() if len(bline) == 0: frame = frame + 1 if progress - frame_start > 10: self.trajectory_map[frame] = [ frame_start, progress ] break sline = bline.decode("utf-8").strip().split() progress = progress + len(bline) self.no_of_frames = frame
Return filepath as a class attribute
378,465
def _GetDenseDimensions(list_of_lists): if not isinstance(list_of_lists, (list, tuple)): return [] elif not list_of_lists: return [0] else: return [len(list_of_lists)] + _GetDenseDimensions(list_of_lists[0])
Returns the inferred dense dimensions of a list of lists.
378,466
def wrap_class(cls, error_threshold=None): methods = inspect.getmembers(cls, inspect.ismethod) + inspect.getmembers(cls, inspect.isfunction) for method_name, method in methods: wrapped_method = flawless.client.client._wrap_function_with_error_decorator( method if not im_self(method) else im_func(method), save_current_stack_trace=False, error_threshold=error_threshold, ) if im_self(method): wrapped_method = classmethod(wrapped_method) setattr(cls, method_name, wrapped_method) return cls
Wraps a class with reporting to errors backend by decorating each function of the class. Decorators are injected under the classmethod decorator if they exist.
378,467
def run_backdoor(address, namespace=None): log.info("starting on %r" % (address,)) serversock = io.Socket() serversock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) serversock.bind(address) serversock.listen(socket.SOMAXCONN) while 1: clientsock, address = serversock.accept() log.info("connection received from %r" % (address,)) scheduler.schedule(backdoor_handler, args=(clientsock, namespace))
start a server that runs python interpreters on connections made to it .. note:: this function blocks effectively indefinitely -- it runs the listening socket loop in the current greenlet. to keep the current greenlet free, :func:`schedule<greenhouse.scheduler.schedule>` this function. :param address: the address on which to listen for backdoor connections, in the form of a two-tuple ``(host, port)`` :type address: tuple :param namespace: the local namespace dict for the interpreter, or None to have each connection create its own empty namespace :type namespace: dict or None
378,468
def get_scanner(self, skey): if skey and self[] == : skey = skey.lower() return self._gsm().get(skey)
Find the appropriate scanner given a key (usually a file suffix).
378,469
def assign_global_ip(self, global_ip_id, target): return self.client[].route( target, id=global_ip_id)
Assigns a global IP address to a specified target. :param int global_ip_id: The ID of the global IP being assigned :param string target: The IP address to assign
378,470
def _merge_array(lhs, rhs, type_): element_type = type_.array_element_type if element_type.code in _UNMERGEABLE_TYPES: lhs.list_value.values.extend(rhs.list_value.values) return lhs lhs, rhs = list(lhs.list_value.values), list(rhs.list_value.values) if not len(lhs) or not len(rhs): return Value(list_value=ListValue(values=(lhs + rhs))) first = rhs.pop(0) if first.HasField("null_value"): lhs.append(first) else: last = lhs.pop() try: merged = _merge_by_type(last, first, element_type) except Unmergeable: lhs.append(last) lhs.append(first) else: lhs.append(merged) return Value(list_value=ListValue(values=(lhs + rhs)))
Helper for '_merge_by_type'.
378,471
def _EncodeUnknownFields(message): source = _UNRECOGNIZED_FIELD_MAPPINGS.get(type(message)) if source is None: return message result = _CopyProtoMessageVanillaProtoJson(message) pairs_field = message.field_by_name(source) if not isinstance(pairs_field, messages.MessageField): raise exceptions.InvalidUserInputError( % pairs_field) pairs_type = pairs_field.message_type value_field = pairs_type.field_by_name() value_variant = value_field.variant pairs = getattr(message, source) codec = _ProtoJsonApiTools.Get() for pair in pairs: encoded_value = codec.encode_field(value_field, pair.value) result.set_unrecognized_field(pair.key, encoded_value, value_variant) setattr(result, source, []) return result
Remap unknown fields in message out of message.source.
378,472
def objects_for_push_notification(notification): notification_el = ElementTree.fromstring(notification) objects = {: notification_el.tag} for child_el in notification_el: tag = child_el.tag res = Resource.value_for_element(child_el) objects[tag] = res return objects
Decode a push notification with the given body XML. Returns a dictionary containing the constituent objects of the push notification. The kind of push notification is given in the ``"type"`` member of the returned dictionary.
378,473
def encode_bbox_target(boxes, anchors): anchors_x1y1x2y2 = tf.reshape(anchors, (-1, 2, 2)) anchors_x1y1, anchors_x2y2 = tf.split(anchors_x1y1x2y2, 2, axis=1) waha = anchors_x2y2 - anchors_x1y1 xaya = (anchors_x2y2 + anchors_x1y1) * 0.5 boxes_x1y1x2y2 = tf.reshape(boxes, (-1, 2, 2)) boxes_x1y1, boxes_x2y2 = tf.split(boxes_x1y1x2y2, 2, axis=1) wbhb = boxes_x2y2 - boxes_x1y1 xbyb = (boxes_x2y2 + boxes_x1y1) * 0.5 txty = (xbyb - xaya) / waha twth = tf.log(wbhb / waha) encoded = tf.concat([txty, twth], axis=1) return tf.reshape(encoded, tf.shape(boxes))
Args: boxes: (..., 4), float32 anchors: (..., 4), float32 Returns: box_encoded: (..., 4), float32 with the same shape.
378,474
def delete_tracking_beacon(self, tracking_beacons_id, **data): return self.delete("/tracking_beacons/{0}/".format(tracking_beacons_id), data=data)
DELETE /tracking_beacons/:tracking_beacons_id/ Delete the :format:`tracking_beacons` with the specified :tracking_beacons_id.
378,475
def prepare_destruction(self, recursive=True): if recursive: for scoped_variable in self.scoped_variables: scoped_variable.prepare_destruction() for connection in self.transitions[:] + self.data_flows[:]: connection.prepare_destruction() for state in self.states.values(): state.prepare_destruction(recursive) del self.scoped_variables[:] del self.transitions[:] del self.data_flows[:] self.states.clear() self.scoped_variables = None self.transitions = None self.data_flows = None self.states = None super(ContainerStateModel, self).prepare_destruction(recursive)
Prepares the model for destruction Recursively un-registers all observers and removes references to child models. Extends the destroy method of the base class by child elements of a container state.
378,476
def flatten(l): return sum(map(flatten, l), []) \ if isinstance(l, list) or isinstance(l, tuple) else [l]
Flatten a nested list.
378,477
def com(self): if self._com is None: self._com = np.mean(self.pmts.pos, axis=0) return self._com
Center of mass, calculated from the mean of the PMT positions
378,478
def _populate_trace(self, graph: TraceGraph, trace_frame_ids: List[int]) -> None: while len(trace_frame_ids) > 0: trace_frame_id = trace_frame_ids.pop() if trace_frame_id in self._visited_trace_frame_ids: continue trace_frame = graph._trace_frames[trace_frame_id] self._add_trace_frame(graph, trace_frame) self._visited_trace_frame_ids.add(trace_frame_id) key = (trace_frame.callee_id.local_id, trace_frame.callee_port) trace_frame_ids.extend( [ trace_frame_id for trace_frame_id in graph._trace_frames_map[key] if trace_frame_id not in self._visited_trace_frame_ids and graph._trace_frames[trace_frame_id].kind == trace_frame.kind ] )
Populates (from the given trace graph) the forward and backward traces reachable from the given traces (including input trace frames). Make sure to respect trace kind in successors
378,479
def poll(self, event, timeout=None): return self.llc.poll(self._tco, event, timeout)
Wait for a socket event. Posssible *event* values are the strings "recv", "send" and "acks". Whent the timeout is present and not :const:`None`, it should be a floating point number specifying the timeout for the operation in seconds (or fractions thereof). For "recv" or "send" the :meth:`poll` method returns :const:`True` if a next :meth:`recv` or :meth:`send` operation would be non-blocking. The "acks" event may only be used with a data-link-connection type socket; the call then returns :const:`True` if the counter of received acknowledgements was greater than zero and decrements the counter by one.
378,480
def parse_data_directories(self, directories=None, forwarded_exports_only=False, import_dllnames_only=False): directory_parsing = ( (, self.parse_import_directory), (, self.parse_export_directory), (, self.parse_resources_directory), (, self.parse_debug_directory), (, self.parse_relocations_directory), (, self.parse_directory_tls), (, self.parse_directory_load_config), (, self.parse_delay_import_directory), (, self.parse_directory_bound_imports) ) if directories is not None: if not isinstance(directories, (tuple, list)): directories = [directories] for entry in directory_parsing: try: directory_index = DIRECTORY_ENTRY[entry[0]] dir_entry = self.OPTIONAL_HEADER.DATA_DIRECTORY[directory_index] except IndexError: break if directories is None or directory_index in directories: if dir_entry.VirtualAddress: if forwarded_exports_only and entry[0] == : value = entry[1](dir_entry.VirtualAddress, dir_entry.Size, forwarded_only=True) elif import_dllnames_only and entry[0] == : value = entry[1](dir_entry.VirtualAddress, dir_entry.Size, dllnames_only=True) else: value = entry[1](dir_entry.VirtualAddress, dir_entry.Size) if value: setattr(self, entry[0][6:], value) if (directories is not None) and isinstance(directories, list) and (entry[0] in directories): directories.remove(directory_index)
Parse and process the PE file's data directories. If the optional argument 'directories' is given, only the directories at the specified indexes will be parsed. Such functionality allows parsing of areas of interest without the burden of having to parse all others. The directories can then be specified as: For export / import only: directories = [ 0, 1 ] or (more verbosely): directories = [ DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT'], DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_EXPORT'] ] If 'directories' is a list, the ones that are processed will be removed, leaving only the ones that are not present in the image. If `forwarded_exports_only` is True, the IMAGE_DIRECTORY_ENTRY_EXPORT attribute will only contain exports that are forwarded to another DLL. If `import_dllnames_only` is True, symbols will not be parsed from the import table and the entries in the IMAGE_DIRECTORY_ENTRY_IMPORT attribute will not have a `symbols` attribute.
378,481
def serializeEc(P, compress=True): return _serialize(P, compress, librelic.ec_size_bin_abi, librelic.ec_write_bin_abi)
Generates a compact binary version of this point.
378,482
def LAST(COND, N1, N2): N2 = 1 if N2 == 0 else N2 assert N2 > 0 assert N1 > N2 return COND.iloc[-N1:-N2].all()
表达持续性 从前N1日到前N2日一直满足COND条件 Arguments: COND {[type]} -- [description] N1 {[type]} -- [description] N2 {[type]} -- [description]
378,483
def generation(self): if not self.parent: return 0 elif self.parent.is_dict: return 1 + self.parent.generation else: return self.parent.generation
Returns the number of ancestors that are dictionaries
378,484
def add(self, item): if self._unique: key = self._key(item) if self._key else item if key in self._seen: return self._seen.add(key) self._work.append(item) self._count += 1
Add an item to the work queue. :param item: The work item to add. An item may be of any type; however, if it is not hashable, then the work queue must either be initialized with ``unique`` set to ``False``, or a ``key`` callable must have been provided.
378,485
def once(ctx, name): from kibitzr.app import Application app = Application() sys.exit(app.run(once=True, log_level=ctx.obj[], names=name))
Run kibitzr checks once and exit
378,486
def min(cls, x: , y: ) -> : return cls._binary_op(x, y, tf.minimum, tf.float32)
Returns a TensorFluent for the minimum function. Args: x: The first operand. y: The second operand. Returns: A TensorFluent wrapping the minimum function.
378,487
def center_mass_exp(interval, scale=1.0): assert isinstance(interval, tuple), assert len(interval) == 2, (interval_left, interval_right) = interval assert interval_left >= 0, assert interval_right > interval_left, \ assert scale > 0, if interval_right < np.inf: return ((interval_left + scale) * np.exp(-interval_left / scale) - ( scale + interval_right) * np.exp(-interval_right / scale)) / ( np.exp(-interval_left / scale) - np.exp(-interval_right / scale)) else: return interval_left + scale
Calculate the center of mass of negative exponential distribution p(x) = exp(-x / scale) / scale in the interval of (interval_left, interval_right). scale is the same scale parameter as scipy.stats.expon.pdf Parameters ---------- interval: size 2 tuple, float interval must be in the form of (interval_left, interval_right), where interval_left/interval_right is the starting/end point of the interval in which the center of mass is calculated for exponential distribution. Note that interval_left must be non-negative, since exponential is not supported in the negative domain, and interval_right must be bigger than interval_left (thus positive) to form a well-defined interval. scale: float, positive The scale parameter of the exponential distribution. See above. Returns ------- m: float The center of mass in the interval of (interval_left, interval_right) for exponential distribution.
378,488
def decode(self, encoding=, errors=): from future.types.newstr import newstr if errors == : from future.utils.surrogateescape import register_surrogateescape register_surrogateescape() return newstr(super(newbytes, self).decode(encoding, errors))
Returns a newstr (i.e. unicode subclass) Decode B using the codec registered for encoding. Default encoding is 'utf-8'. errors may be given to set a different error handling scheme. Default is 'strict' meaning that encoding errors raise a UnicodeDecodeError. Other possible values are 'ignore' and 'replace' as well as any other name registered with codecs.register_error that is able to handle UnicodeDecodeErrors.
378,489
def get_reply(self, param, must=[APIKEY, START_TIME, END_TIME, PAGE_NUM, PAGE_SIZE]): r = self.verify_param(param, must) if not r.is_succ(): return r h = CommonResultHandler(lambda rsp: {VERSION_V1:rsp[SMS_REPLY] if SMS_REPLY in rsp else None, VERSION_V2:rsp}[self.version()]) return self.path().post(param, h, r)
查回复的短信 参数名 类型 是否必须 描述 示例 apikey String 是 用户唯一标识 9b11127a9701975c734b8aee81ee3526 start_time String 是 短信回复开始时间 2013-08-11 00:00:00 end_time String 是 短信回复结束时间 2013-08-12 00:00:00 page_num Integer 是 页码,默认值为1 1 page_size Integer 是 每页个数,最大100个 20 mobile String 否 填写时只查该手机号的回复,不填时查所有的回复 15205201314 return_fields 否 返回字段(暂未开放 sort_fields 否 排序字段(暂未开放) 默认按提交时间降序 Args: param: Results: Result
378,490
def get_model_agents(self): model_stmts = self.get_statements() agents = [] for stmt in model_stmts: for a in stmt.agent_list(): if a is not None: agents.append(a) return agents
Return a list of all Agents from all Statements. Returns ------- agents : list[indra.statements.Agent] A list of Agents that are in the model.
378,491
def enqueue(self, s): self._parts.append(s) self._len += len(s)
Append `s` to the queue. Equivalent to:: queue += s if `queue` where a regular string.
378,492
def configure_upload(self, ns, definition): upload = self.create_upload_func(ns, definition, ns.collection_path, Operation.Upload) upload.__doc__ = "Upload a {}".format(ns.subject_name)
Register an upload endpoint. The definition's func should be an upload function, which must: - accept kwargs for path data and query string parameters - accept a list of tuples of the form (formname, tempfilepath, filename) - optionally return a resource :param ns: the namespace :param definition: the endpoint definition
378,493
def create_role(self, name): models = self.session.router return models.role.new(name=name, owner=self)
Create a new :class:`Role` owned by this :class:`Subject`
378,494
def get_abs_and_rel_paths(self, root_path, file_name, input_dir): relative_dir = root_path.replace(input_dir, ) return os.path.join(root_path, file_name), relative_dir + + file_name
Return absolute and relative path for file :type root_path: str|unicode :type file_name: str|unicode :type input_dir: str|unicode :rtype: tuple
378,495
def download_post(self, post: Post, target: str) -> bool: dirname = _PostPathFormatter(post).format(self.dirname_pattern, target=target) filename = dirname + + self.format_filename(post, target=target) os.makedirs(os.path.dirname(filename), exist_ok=True) downloaded = True self._committed = self.check_if_committed(filename) if self.download_pictures: if post.typename == : edge_number = 1 for sidecar_node in post.get_sidecar_nodes(): if not sidecar_node.is_video or self.download_video_thumbnails is True: downloaded &= self.download_pic(filename=filename, url=sidecar_node.display_url, mtime=post.date_local, filename_suffix=str(edge_number)) if sidecar_node.is_video and self.download_videos is True: downloaded &= self.download_pic(filename=filename, url=sidecar_node.video_url, mtime=post.date_local, filename_suffix=str(edge_number)) edge_number += 1 elif post.typename == : downloaded = self.download_pic(filename=filename, url=post.url, mtime=post.date_local) elif post.typename == : if self.download_video_thumbnails is True: downloaded = self.download_pic(filename=filename, url=post.url, mtime=post.date_local) else: self.context.error("Warning: {0} has unknown typename: {1}".format(post, post.typename)) metadata_string = _ArbitraryItemFormatter(post).format(self.post_metadata_txt_pattern).strip() if metadata_string: self.save_caption(filename=filename, mtime=post.date_local, caption=metadata_string) if post.is_video and self.download_videos is True: downloaded &= self.download_pic(filename=filename, url=post.video_url, mtime=post.date_local) if self.download_geotags and post.location: self.save_location(filename, post.location, post.date_local) if self.download_comments is True: self.update_comments(filename=filename, post=post) if self.save_metadata is not False: self.save_metadata_json(filename, post) self.context.log() return downloaded
Download everything associated with one instagram post node, i.e. picture, caption and video. :param post: Post to download. :param target: Target name, i.e. profile name, #hashtag, :feed; for filename. :return: True if something was downloaded, False otherwise, i.e. file was already there
378,496
def get_orm_columns(cls: Type) -> List[Column]: mapper = inspect(cls) colmap = mapper.columns return colmap.values()
Gets :class:`Column` objects from an SQLAlchemy ORM class. Does not provide their attribute names.
378,497
def p_member_expr(self, p): if len(p) == 2: p[0] = p[1] elif p[1] == : p[0] = ast.NewExpr(p[2], p[3]) elif p[2] == : p[0] = ast.DotAccessor(p[1], p[3]) else: p[0] = ast.BracketAccessor(p[1], p[3])
member_expr : primary_expr | function_expr | member_expr LBRACKET expr RBRACKET | member_expr PERIOD identifier | NEW member_expr arguments
378,498
def update(cls, name=None, public_nick_name=None, avatar_uuid=None, address_main=None, address_postal=None, language=None, region=None, country=None, ubo=None, chamber_of_commerce_number=None, legal_form=None, status=None, sub_status=None, session_timeout=None, daily_limit_without_confirmation_login=None, notification_filters=None, custom_headers=None): if custom_headers is None: custom_headers = {} api_client = client.ApiClient(cls._get_api_context()) request_map = { cls.FIELD_NAME: name, cls.FIELD_PUBLIC_NICK_NAME: public_nick_name, cls.FIELD_AVATAR_UUID: avatar_uuid, cls.FIELD_ADDRESS_MAIN: address_main, cls.FIELD_ADDRESS_POSTAL: address_postal, cls.FIELD_LANGUAGE: language, cls.FIELD_REGION: region, cls.FIELD_COUNTRY: country, cls.FIELD_UBO: ubo, cls.FIELD_CHAMBER_OF_COMMERCE_NUMBER: chamber_of_commerce_number, cls.FIELD_LEGAL_FORM: legal_form, cls.FIELD_STATUS: status, cls.FIELD_SUB_STATUS: sub_status, cls.FIELD_SESSION_TIMEOUT: session_timeout, cls.FIELD_DAILY_LIMIT_WITHOUT_CONFIRMATION_LOGIN: daily_limit_without_confirmation_login, cls.FIELD_NOTIFICATION_FILTERS: notification_filters } request_map_string = converter.class_to_json(request_map) request_map_string = cls._remove_field_for_request(request_map_string) request_bytes = request_map_string.encode() endpoint_url = cls._ENDPOINT_URL_UPDATE.format(cls._determine_user_id()) response_raw = api_client.put(endpoint_url, request_bytes, custom_headers) return BunqResponseInt.cast_from_bunq_response( cls._process_for_id(response_raw) )
Modify a specific company's data. :type user_company_id: int :param name: The company name. :type name: str :param public_nick_name: The company's nick name. :type public_nick_name: str :param avatar_uuid: The public UUID of the company's avatar. :type avatar_uuid: str :param address_main: The user's main address. :type address_main: object_.Address :param address_postal: The company's postal address. :type address_postal: object_.Address :param language: The person's preferred language. Formatted as a ISO 639-1 language code plus a ISO 3166-1 alpha-2 country code, seperated by an underscore. :type language: str :param region: The person's preferred region. Formatted as a ISO 639-1 language code plus a ISO 3166-1 alpha-2 country code, seperated by an underscore. :type region: str :param country: The country where the company is registered. :type country: str :param ubo: The names and birth dates of the company's ultimate beneficiary owners. Minimum zero, maximum four. :type ubo: list[object_.Ubo] :param chamber_of_commerce_number: The company's chamber of commerce number. :type chamber_of_commerce_number: str :param legal_form: The company's legal form. :type legal_form: str :param status: The user status. Can be: ACTIVE, SIGNUP, RECOVERY. :type status: str :param sub_status: The user sub-status. Can be: NONE, FACE_RESET, APPROVAL, APPROVAL_DIRECTOR, APPROVAL_PARENT, APPROVAL_SUPPORT, COUNTER_IBAN, IDEAL or SUBMIT. :type sub_status: str :param session_timeout: The setting for the session timeout of the company in seconds. :type session_timeout: int :param daily_limit_without_confirmation_login: The amount the company can pay in the session without asking for credentials. :type daily_limit_without_confirmation_login: object_.Amount :param notification_filters: The types of notifications that will result in a push notification or URL callback for this UserCompany. :type notification_filters: list[object_.NotificationFilter] :type custom_headers: dict[str, str]|None :rtype: BunqResponseInt
378,499
def pack(self, value, nocheck=False, major=DEFAULT_KATCP_MAJOR): if value is None: value = self.get_default() if value is None: raise ValueError("Cannot pack a None value.") if not nocheck: self.check(value, major) return self.encode(value, major)
Return the value formatted as a KATCP parameter. Parameters ---------- value : object The value to pack. nocheck : bool, optional Whether to check that the value is valid before packing it. major : int, optional Major version of KATCP to use when interpreting types. Defaults to latest implemented KATCP version. Returns ------- packed_value : str The unescaped KATCP string representing the value.