Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
11,900
def cli(env, identifier, uri, ibm_api_key): image_mgr = SoftLayer.ImageManager(env.client) image_id = helpers.resolve_id(image_mgr.resolve_ids, identifier, ) result = image_mgr.export_image_to_uri(image_id, uri, ibm_api_key) if not result: raise exceptions.CLIAbort("Failed to export Image")
Export an image to object storage. The URI for an object storage object (.vhd/.iso file) of the format: swift://<objectStorageAccount>@<cluster>/<container>/<objectPath> or cos://<regionName>/<bucketName>/<objectPath> if using IBM Cloud Object Storage
11,901
def process(self, job_id): self._logger.info( .format(self._env.now, job_id) ) self._observer.notify_service(time=self._env.now, job_id=job_id) try: service_time = next(self._service_time_generator) except StopIteration: error_msg = () self._logger.error(error_msg) raise GGCQServiceTimeStopIteration(error_msg) try: self._logger.debug(.format(service_time)) except: pass try: yield self._env.timeout(service_time) except TypeError: error_msg = ( "service time has wrong type ".format( service_time, type(service_time).__name__ ) ) self._logger.error(error_msg) raise GGCQServiceTimeTypeError(error_msg) except ValueError as exc: if str(exc).startswith(): error_msg = ( "negative service time {:.2f}".format( service_time ) ) self._logger.error(error_msg) raise GGCQNegativeServiceTimeError(error_msg) else: raise self._logger.info( .format(self._env.now, job_id) ) self._observer.notify_departure(time=self._env.now, job_id=job_id)
Process a job by the queue
11,902
def localize_fieldnames(fields, internationalized_fields): result = [] lang = get_language() for field in fields: if field in internationalized_fields: result.append(get_real_fieldname(field, lang)) else: result.append(field) return result
Given a list of fields and a list of field names that are internationalized, will return a list with all internationalized fields properly localized. >>> from django.utils.translation import activate >>> activate('en-us') >>> localize_fieldnames(['name', 'title', 'url'], ['title']) ['name', 'title_en_us', 'url'] :param fields: A :class:`list` af field names. :param internationalized_fields: A list of fields names, these fields are internationalized. :rtype: A list with the actual field names that are used in the current language.
11,903
def derivative(self, x): return self.scalar * self.operator.derivative(self.scalar * x)
Return the derivative at ``x``. The derivative of the right scalar operator multiplication follows the chain rule: ``OperatorRightScalarMult(op, s).derivative(y) == OperatorLeftScalarMult(op.derivative(s * y), s)`` Parameters ---------- x : `domain` `element-like` Evaluation point of the derivative. Examples -------- >>> space = odl.rn(3) >>> operator = odl.IdentityOperator(space) - space.element([1, 1, 1]) >>> left_mul_op = OperatorRightScalarMult(operator, 3) >>> derivative = left_mul_op.derivative([0, 0, 0]) >>> derivative([1, 1, 1]) rn(3).element([ 3., 3., 3.])
11,904
def as_xml(self): self.default_capability() s = self.new_sitemap() return s.resources_as_xml(self, sitemapindex=self.sitemapindex)
Return XML serialization of this list. This code does not support the case where the list is too big for a single XML document.
11,905
def read_namespaced_pod_status(self, name, namespace, **kwargs): kwargs[] = True if kwargs.get(): return self.read_namespaced_pod_status_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_pod_status_with_http_info(name, namespace, **kwargs) return data
read_namespaced_pod_status # noqa: E501 read status of the specified Pod # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_pod_status(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Pod (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Pod If the method is called asynchronously, returns the request thread.
11,906
def clear_display_name(self): if (self.get_display_name_metadata().is_read_only() or self.get_display_name_metadata().is_required()): raise errors.NoAccess() self._my_map[] = self._display_name_metadata[][0]
Clears the display name. raise: NoAccess - ``display_name`` cannot be modified *compliance: mandatory -- This method must be implemented.*
11,907
def save_data(self): title = _( "Save profiler result") filename, _selfilter = getsavefilename( self, title, getcwd_or_home(), _("Profiler result")+" (*.Result)") if filename: self.datatree.save_data(filename)
Save data
11,908
def find_device(self, service_uuids=[], name=None, timeout_sec=TIMEOUT_SEC): start = time.time() while True: found = self.find_devices(service_uuids, name) if len(found) > 0: return found[0] if time.time()-start >= timeout_sec: return None time.sleep(1)
Return the first device that advertises the specified service UUIDs or has the specified name. Will wait up to timeout_sec seconds for the device to be found, and if the timeout is zero then it will not wait at all and immediately return a result. When no device is found a value of None is returned.
11,909
def MI_referenceNames(self, env, objectName, resultClassName, role): logger = env.get_logger() logger.log_debug( \ % (resultClassName)) if not resultClassName: raise pywbem.CIMError( pywbem.CIM_ERR_FAILED, "Empty resultClassName passed to ReferenceNames") model = pywbem.CIMInstance(classname=resultClassName) model.path = pywbem.CIMInstanceName(classname=resultClassName, namespace=objectName.namespace) if role: if role in model.properties: model[role] = objectName gen = self.references(env=env, object_name=objectName, model=model, result_class_name=, role=role, result_role=None, keys_only=True) if gen is None: logger.log_debug( \ ) return for inst in gen: for prop in inst.properties.values(): if hasattr(prop.value, ) and \ prop.value.namespace is None: prop.value.namespace = objectName.namespace yield inst.path logger.log_debug()
Return instance names of an association class. Implements the WBEM operation ReferenceNames in terms of the references method. A derived class will not normally override this method.
11,910
def _build_latex_array(self, aliases=None): columns = 1 if aliases: qregdata = {} for q in aliases.values(): if q[0] not in qregdata: qregdata[q[0]] = q[1] + 1 elif qregdata[q[0]] < q[1] + 1: qregdata[q[0]] = q[1] + 1 else: qregdata = self.qregs for column, layer in enumerate(self.ops, 1): for op in layer: if op.condition: mask = self._get_mask(op.condition[0]) cl_reg = self.clbit_list[self._ffs(mask)] if_reg = cl_reg[0] pos_2 = self.img_regs[cl_reg] if_value = format(op.condition[1], ).zfill(self.cregs[if_reg])[::-1] if op.name not in [, , , , , ]: nm = op.name qarglist = op.qargs if aliases is not None: qarglist = map(lambda x: aliases[x], qarglist) if len(qarglist) == 1: pos_1 = self.img_regs[(qarglist[0][0], qarglist[0][1])] if op.condition: mask = self._get_mask(op.condition[0]) cl_reg = self.clbit_list[self._ffs(mask)] if_reg = cl_reg[0] pos_2 = self.img_regs[cl_reg] if nm == "x": self._latex[pos_1][column] = "\\gate{X}" elif nm == "y": self._latex[pos_1][column] = "\\gate{Y}" elif nm == "z": self._latex[pos_1][column] = "\\gate{Z}" elif nm == "h": self._latex[pos_1][column] = "\\gate{H}" elif nm == "s": self._latex[pos_1][column] = "\\gate{S}" elif nm == "sdg": self._latex[pos_1][column] = "\\gate{S^\\dag}" elif nm == "t": self._latex[pos_1][column] = "\\gate{T}" elif nm == "tdg": self._latex[pos_1][column] = "\\gate{T^\\dag}" elif nm == "u0": self._latex[pos_1][column] = "\\gate{U_0(%s)}" % ( op.op.params[0]) elif nm == "u1": self._latex[pos_1][column] = "\\gate{U_1(%s)}" % ( op.op.params[0]) elif nm == "u2": self._latex[pos_1][column] = \ "\\gate{U_2\\left(%s,%s\\right)}" % ( op.op.params[0], op.op.params[1]) elif nm == "u3": self._latex[pos_1][column] = ("\\gate{U_3(%s,%s,%s)}" % ( op.op.params[0], op.op.params[1], op.op.params[2])) elif nm == "rx": self._latex[pos_1][column] = "\\gate{R_x(%s)}" % ( op.op.params[0]) elif nm == "ry": self._latex[pos_1][column] = "\\gate{R_y(%s)}" % ( op.op.params[0]) elif nm == "rz": self._latex[pos_1][column] = "\\gate{R_z(%s)}" % ( op.op.params[0]) else: self._latex[pos_1][columns] = "\\gate{%s}" % nm gap = pos_2 - pos_1 for i in range(self.cregs[if_reg]): if if_value[i] == : self._latex[pos_2 + i][column] = \ "\\control \\cw \\cwx[-" + str(gap) + "]" gap = 1 else: self._latex[pos_2 + i][column] = \ "\\controlo \\cw \\cwx[-" + str(gap) + "]" gap = 1 else: if nm == "x": self._latex[pos_1][column] = "\\gate{X}" elif nm == "y": self._latex[pos_1][column] = "\\gate{Y}" elif nm == "z": self._latex[pos_1][column] = "\\gate{Z}" elif nm == "h": self._latex[pos_1][column] = "\\gate{H}" elif nm == "s": self._latex[pos_1][column] = "\\gate{S}" elif nm == "sdg": self._latex[pos_1][column] = "\\gate{S^\\dag}" elif nm == "t": self._latex[pos_1][column] = "\\gate{T}" elif nm == "tdg": self._latex[pos_1][column] = "\\gate{T^\\dag}" elif nm == "u0": self._latex[pos_1][column] = "\\gate{U_0(%s)}" % ( op.op.params[0]) elif nm == "u1": self._latex[pos_1][column] = "\\gate{U_1(%s)}" % ( op.op.params[0]) elif nm == "u2": self._latex[pos_1][column] = \ "\\gate{U_2\\left(%s,%s\\right)}" % ( op.op.params[0], op.op.params[1]) elif nm == "u3": self._latex[pos_1][column] = ("\\gate{U_3(%s,%s,%s)}" % ( op.op.params[0], op.op.params[1], op.op.params[2])) elif nm == "rx": self._latex[pos_1][column] = "\\gate{R_x(%s)}" % ( op.op.params[0]) elif nm == "ry": self._latex[pos_1][column] = "\\gate{R_y(%s)}" % ( op.op.params[0]) elif nm == "rz": self._latex[pos_1][column] = "\\gate{R_z(%s)}" % ( op.op.params[0]) elif nm == "reset": self._latex[pos_1][column] = ( "\\push{\\rule{.6em}{0em}\\ket{0}\\" "rule{.2em}{0em}} \\qw") else: self._latex[pos_1][columns] = "\\gate{%s}" % nm elif len(qarglist) == 2: pos_1 = self.img_regs[(qarglist[0][0], qarglist[0][1])] pos_2 = self.img_regs[(qarglist[1][0], qarglist[1][1])] if op.condition: pos_3 = self.img_regs[(if_reg, 0)] temp = [pos_1, pos_2, pos_3] temp.sort(key=int) bottom = temp[1] gap = pos_3 - bottom for i in range(self.cregs[if_reg]): if if_value[i] == : self._latex[pos_3 + i][column] = \ "\\control \\cw \\cwx[-" + str(gap) + "]" gap = 1 else: self._latex[pos_3 + i][column] = \ "\\controlo \\cw \\cwx[-" + str(gap) + "]" gap = 1 if nm == "cx": self._latex[pos_1][column] = \ "\\ctrl{" + str(pos_2 - pos_1) + "}" self._latex[pos_2][column] = "\\targ" elif nm == "cz": self._latex[pos_1][column] = \ "\\ctrl{" + str(pos_2 - pos_1) + "}" self._latex[pos_2][column] = "\\control\\qw" elif nm == "cy": self._latex[pos_1][column] = \ "\\ctrl{" + str(pos_2 - pos_1) + "}" self._latex[pos_2][column] = "\\gate{Y}" elif nm == "ch": self._latex[pos_1][column] = \ "\\ctrl{" + str(pos_2 - pos_1) + "}" self._latex[pos_2][column] = "\\gate{H}" elif nm == "swap": self._latex[pos_1][column] = "\\qswap" self._latex[pos_2][column] = \ "\\qswap \\qwx[" + str(pos_1 - pos_2) + "]" elif nm == "crz": self._latex[pos_1][column] = \ "\\ctrl{" + str(pos_2 - pos_1) + "}" self._latex[pos_2][column] = \ "\\gate{R_z(%s)}" % (op.op.params[0]) elif nm == "cu1": self._latex[pos_1][column - 1] = "\\ctrl{" + str( pos_2 - pos_1) + "}" self._latex[pos_2][column - 1] = "\\control\\qw" self._latex[min(pos_1, pos_2)][column] = \ "\\dstick{%s}\\qw" % (op.op.params[0]) self._latex[max(pos_1, pos_2)][column] = "\\qw" elif nm == "cu3": self._latex[pos_1][column] = \ "\\ctrl{" + str(pos_2 - pos_1) + "}" self._latex[pos_2][column] = \ "\\gate{U_3(%s,%s,%s)}" % (op.op.params[0], op.op.params[1], op.op.params[2]) else: temp = [pos_1, pos_2] temp.sort(key=int) if nm == "cx": self._latex[pos_1][column] = "\\ctrl{" + str( pos_2 - pos_1) + "}" self._latex[pos_2][column] = "\\targ" elif nm == "cz": self._latex[pos_1][column] = "\\ctrl{" + str( pos_2 - pos_1) + "}" self._latex[pos_2][column] = "\\control\\qw" elif nm == "cy": self._latex[pos_1][column] = "\\ctrl{" + str( pos_2 - pos_1) + "}" self._latex[pos_2][column] = "\\gate{Y}" elif nm == "ch": self._latex[pos_1][column] = "\\ctrl{" + str( pos_2 - pos_1) + "}" self._latex[pos_2][column] = "\\gate{H}" elif nm == "swap": self._latex[pos_1][column] = "\\qswap" self._latex[pos_2][column] = \ "\\qswap \\qwx[" + str(pos_1 - pos_2) + "]" elif nm == "crz": self._latex[pos_1][column] = "\\ctrl{" + str( pos_2 - pos_1) + "}" self._latex[pos_2][column] = \ "\\gate{R_z(%s)}" % (op.op.params[0]) elif nm == "cu1": self._latex[pos_1][column - 1] = "\\ctrl{" + str( pos_2 - pos_1) + "}" self._latex[pos_2][column - 1] = "\\control\\qw" self._latex[min(pos_1, pos_2)][column] = \ "\\dstick{%s}\\qw" % (op.op.params[0]) self._latex[max(pos_1, pos_2)][column] = "\\qw" elif nm == "cu3": self._latex[pos_1][column] = "\\ctrl{" + str( pos_2 - pos_1) + "}" self._latex[pos_2][column] = ("\\gate{U_3(%s,%s,%s)}" % ( op.op.params[0], op.op.params[1], op.op.params[2])) else: start_pos = min([pos_1, pos_2]) stop_pos = max([pos_1, pos_2]) if stop_pos - start_pos >= 2: delta = stop_pos - start_pos self._latex[start_pos][columns] = ( "\\multigate{%s}{%s}" % (delta, nm)) for i_pos in range(start_pos + 1, stop_pos + 1): self._latex[i_pos][columns] = "\\ghost{%s}" % nm else: self._latex[start_pos][columns] = ( "\\multigate{1}{%s}" % nm) self._latex[stop_pos][columns] = "\\ghost{%s}" % nm elif len(qarglist) == 3: pos_1 = self.img_regs[(qarglist[0][0], qarglist[0][1])] pos_2 = self.img_regs[(qarglist[1][0], qarglist[1][1])] pos_3 = self.img_regs[(qarglist[2][0], qarglist[2][1])] if op.condition: pos_4 = self.img_regs[(if_reg, 0)] temp = [pos_1, pos_2, pos_3, pos_4] temp.sort(key=int) bottom = temp[2] prev_column = [x[column - 1] for x in self._latex] for item, prev_entry in enumerate(prev_column): if in prev_entry: span = re.search(, prev_entry) if span and any(i in temp for i in range( item, int(span.group(1)))): self._latex[item][column - 1] = \ prev_entry.replace( , ) gap = pos_4 - bottom for i in range(self.cregs[if_reg]): if if_value[i] == : self._latex[pos_4 + i][column] = \ "\\control \\cw \\cwx[-" + str(gap) + "]" gap = 1 else: self._latex[pos_4 + i][column] = \ "\\controlo \\cw \\cwx[-" + str(gap) + "]" gap = 1 if nm == "ccx": self._latex[pos_1][column] = "\\ctrl{" + str( pos_2 - pos_1) + "}" self._latex[pos_2][column] = "\\ctrl{" + str( pos_3 - pos_2) + "}" self._latex[pos_3][column] = "\\targ" if nm == "cswap": self._latex[pos_1][column] = "\\ctrl{" + str( pos_2 - pos_1) + "}" self._latex[pos_2][column] = "\\qswap" self._latex[pos_3][column] = \ "\\qswap \\qwx[" + str(pos_2 - pos_3) + "]" else: temp = [pos_1, pos_2, pos_3] temp.sort(key=int) prev_column = [x[column - 1] for x in self._latex] for item, prev_entry in enumerate(prev_column): if in prev_entry: span = re.search(, prev_entry) if span and any(i in temp for i in range( item, int(span.group(1)))): self._latex[item][column - 1] = \ prev_entry.replace( , ) if nm == "ccx": self._latex[pos_1][column] = "\\ctrl{" + str( pos_2 - pos_1) + "}" self._latex[pos_2][column] = "\\ctrl{" + str( pos_3 - pos_2) + "}" self._latex[pos_3][column] = "\\targ" elif nm == "cswap": self._latex[pos_1][column] = "\\ctrl{" + str( pos_2 - pos_1) + "}" self._latex[pos_2][column] = "\\qswap" self._latex[pos_3][column] = \ "\\qswap \\qwx[" + str(pos_2 - pos_3) + "]" else: start_pos = min([pos_1, pos_2, pos_3]) stop_pos = max([pos_1, pos_2, pos_3]) if stop_pos - start_pos >= 3: delta = stop_pos - start_pos self._latex[start_pos][columns] = ( "\\multigate{%s}{%s}" % (delta, nm)) for i_pos in range(start_pos + 1, stop_pos + 1): self._latex[i_pos][columns] = "\\ghost{%s}" % nm else: self._latex[pos_1][columns] = ( "\\multigate{2}{%s}" % nm) self._latex[pos_2][columns] = "\\ghost{%s}" % nm self._latex[pos_3][columns] = "\\ghost{%s}" % nm elif len(qarglist) > 3: nbits = len(qarglist) pos_array = [self.img_regs[(qarglist[0][0], qarglist[0][1])]] for i in range(1, nbits): pos_array.append(self.img_regs[(qarglist[i][0], qarglist[i][1])]) pos_start = min(pos_array) pos_stop = max(pos_array) delta = pos_stop - pos_start self._latex[pos_start][columns] = ( "\\multigate{%s}{%s}" % (nbits - 1, nm)) for pos in range(pos_start + 1, pos_stop + 1): self._latex[pos][columns] = "\\ghost{%s}" % nm elif op.name == "measure": if (len(op.cargs) != 1 or len(op.qargs) != 1 or op.op.params): raise exceptions.VisualizationError("bad operation record") if op.condition: raise exceptions.VisualizationError( "If controlled measures currently not supported.") qname, qindex = op.qargs[0] cname, cindex = op.cargs[0] if aliases: newq = aliases[(qname, qindex)] qname = newq[0] qindex = newq[1] pos_1 = self.img_regs[(qname, qindex)] pos_2 = self.img_regs[(cname, cindex)] try: self._latex[pos_1][column] = "\\meter" prev_column = [x[column - 1] for x in self._latex] for item, prev_entry in enumerate(prev_column): if in prev_entry: span = re.search(, prev_entry) if span and ( item + int(span.group(1))) - pos_1 >= 0: self._latex[item][column - 1] = \ prev_entry.replace( , ) self._latex[pos_2][column] = \ "\\cw \\cwx[-" + str(pos_2 - pos_1) + "]" except Exception as e: raise exceptions.VisualizationError( % str(e)) elif op.name in [, , , , ]: if self.plot_barriers: qarglist = op.qargs indexes = [self._get_qubit_index(x) for x in qarglist] start_bit = self.qubit_list[min(indexes)] if aliases is not None: qarglist = map(lambda x: aliases[x], qarglist) start = self.img_regs[start_bit] span = len(op.qargs) - 1 self._latex[start][column] = "\\qw \\barrier{" + str( span) + "}" else: raise exceptions.VisualizationError("bad node data")
Returns an array of strings containing \\LaTeX for this circuit. If aliases is not None, aliases contains a dict mapping the current qubits in the circuit to new qubit names. We will deduce the register names and sizes from aliases.
11,911
def get_value(self, index): if index.column() == 0: return self.keys[ index.row() ] elif index.column() == 1: return self.types[ index.row() ] elif index.column() == 2: return self.sizes[ index.row() ] else: return self._data[ self.keys[index.row()] ]
Return current value
11,912
def dot(x_gpu, y_gpu, transa=, transb=, handle=None, target=None): if handle is None: handle = _global_cublas_handle if len(x_gpu.shape) == 1 and len(y_gpu.shape) == 1: if x_gpu.size != y_gpu.size: raise ValueError( % (x_gpu.size, y_gpu.size)) if (x_gpu.dtype == np.complex64 and y_gpu.dtype == np.complex64): cublas_func = cublas.cublasCdotu elif (x_gpu.dtype == np.float32 and y_gpu.dtype == np.float32): cublas_func = cublas.cublasSdot elif (x_gpu.dtype == np.complex128 and y_gpu.dtype == np.complex128): cublas_func = cublas.cublasZdotu elif (x_gpu.dtype == np.float64 and y_gpu.dtype == np.float64): cublas_func = cublas.cublasDdot else: raise ValueError( % (str(x_gpu.dtype), str(y_gpu.dtype))) return cublas_func(handle, x_gpu.size, x_gpu.gpudata, 1, y_gpu.gpudata, 1) else: x_shape = x_gpu.shape y_shape = y_gpu.shape if len(x_shape) == 1: x_shape = (1, x_shape[0]) if len(y_shape) == 1: y_shape = (1, y_shape[0]) if (x_gpu.dtype == np.complex64 and y_gpu.dtype == np.complex64): cublas_func = cublas.cublasCgemm alpha = np.complex64(1.0) beta = np.complex64(0.0) elif (x_gpu.dtype == np.float32 and y_gpu.dtype == np.float32): cublas_func = cublas.cublasSgemm alpha = np.float32(1.0) beta = np.float32(0.0) elif (x_gpu.dtype == np.complex128 and y_gpu.dtype == np.complex128): cublas_func = cublas.cublasZgemm alpha = np.complex128(1.0) beta = np.complex128(0.0) elif (x_gpu.dtype == np.float64 and y_gpu.dtype == np.float64): cublas_func = cublas.cublasDgemm alpha = np.float64(1.0) beta = np.float64(0.0) else: raise ValueError( % (str(x_gpu.dtype), str(y_gpu.dtype))) transa = lower(transa) transb = lower(transb) if transb in [, ]: m, k = y_shape elif transb in []: k, m = y_shape else: raise ValueError( % transb) if transa in [, ]: l, n = x_shape elif transa in []: n, l = x_shape else: raise ValueError( % transa) if l != k: raise ValueError( % (x_shape, y_shape)) if transb == : lda = max(1, m) else: lda = max(1, k) if transa == : ldb = max(1, k) else: ldb = max(1, n) ldc = max(1, m) if target is None: target = gpuarray.empty((n, ldc), x_gpu.dtype, allocator=memory_pool.allocate) cublas_func(handle, transb, transa, m, n, k, alpha, y_gpu.gpudata, lda, x_gpu.gpudata, ldb, beta, target.gpudata, ldc) return target
Dot product of two arrays. For 1D arrays, this function computes the inner product. For 2D arrays of shapes `(m, k)` and `(k, n)`, it computes the matrix product; the result has shape `(m, n)`. Parameters ---------- x_gpu : pycuda.gpuarray.GPUArray Input array. y_gpu : pycuda.gpuarray.GPUArray Input array. transa : char If 'T', compute the product of the transpose of `x_gpu`. If 'C', compute the product of the Hermitian of `x_gpu`. transb : char If 'T', compute the product of the transpose of `y_gpu`. If 'C', compute the product of the Hermitian of `y_gpu`. handle : int CUBLAS context. If no context is specified, the default handle from `scikits.cuda.misc._global_cublas_handle` is used. Returns ------- c_gpu : pycuda.gpuarray.GPUArray, float{32,64}, or complex{64,128} Inner product of `x_gpu` and `y_gpu`. When the inputs are 1D arrays, the result will be returned as a scalar. Notes ----- The input matrices must all contain elements of the same data type. Examples -------- >>> import pycuda.gpuarray as gpuarray >>> import pycuda.autoinit >>> import numpy as np >>> import linalg >>> import misc >>> linalg.init() >>> a = np.asarray(np.random.rand(4, 2), np.float32) >>> b = np.asarray(np.random.rand(2, 2), np.float32) >>> a_gpu = gpuarray.to_gpu(a) >>> b_gpu = gpuarray.to_gpu(b) >>> c_gpu = linalg.dot(a_gpu, b_gpu) >>> np.allclose(np.dot(a, b), c_gpu.get()) True >>> d = np.asarray(np.random.rand(5), np.float32) >>> e = np.asarray(np.random.rand(5), np.float32) >>> d_gpu = gpuarray.to_gpu(d) >>> e_gpu = gpuarray.to_gpu(e) >>> f = linalg.dot(d_gpu, e_gpu) >>> np.allclose(np.dot(d, e), f) True
11,913
def get_type(full_path): status = {: []} if os.path.ismount(full_path): status[] += [] elif os.path.islink(full_path): status[] += [] if os.path.isfile(full_path): status[] += [] elif os.path.isdir(full_path): status[] += [] if not status[]: if os.stat.S_ISSOCK(status[]): status[] += [] elif os.stat.S_ISCHR(status[]): status[] += [] elif os.stat.S_ISBLK(status[]): status[] += [] elif os.stat.S_ISFIFO(status[]): status[] += [] if not status[]: status[] += [] elif status[] and status[][-1] == : status[] += [] return status[]
Get the type (socket, file, dir, symlink, ...) for the provided path
11,914
def start_server(app: web.Application = None, port: int = None, address: str = None, **kwargs: Any) -> HTTPServer: app = app or get_app() port = port if port is not None else config.port address = address if address is not None else config.address server = app.listen(port, address=address) app.server = server app.loop = asyncio.get_event_loop() server_config[] = address for sock in server._sockets.values(): if sock.family == socket.AF_INET: server_config[] = sock.getsockname()[1] break return server
Start server with ``app`` on ``localhost:port``. If port is not specified, use command line option of ``--port``.
11,915
def plotGrid(self, numLines=(5,5), lineWidth=1, colour=" x1, x2, y1, y2 = mp.axis() ra1, dec0 = self.pixToSky(x1, y1) ra0, dec1 = self.pixToSky(x2, y2) xNum, yNum = numLines self.raRange, self.decRange = self.getRaDecRanges(numLines) a1 = np.abs(ra1-ra0) a2 = np.abs( min(ra0, ra1) - (max(ra0, ra1) - 360)) if a2 < a1: if ra0 < ra1: ra1 -= 360 else: ra0 -= 360 lwr = min(ra0, ra1) upr = max(ra0, ra1) stepX = round((upr-lwr) / float(xNum)) ra_deg = np.arange(lwr - 3*stepX, upr + 3.5*stepX, 1, dtype=np.float) for dec in self.decRange: self.plotLine(ra_deg, dec, , color = colour, linewidth = lineWidth) lwr = min(dec0, dec1) upr = max(dec0, dec1) stepY = round((upr-lwr) / float(yNum)) dec_deg = np.arange(dec0 - 3*stepY, dec1 + 3.5*stepY, 1, dtype=np.float) for ra in self.raRange: self.plotLine(ra, dec_deg, , color = colour, linewidth = lineWidth) mp.axis([x1, x2, y1, y2])
Plot NUMLINES[0] vertical gridlines and NUMLINES[1] horizontal gridlines, while keeping the initial axes bounds that were present upon its calling. Will not work for certain cases.
11,916
def prepend_string_list(self, key, value, max_length_key): max_len = self.get(max_length_key) strings = self.get_string_list(key) strings = [value] + [x for x in strings if x != value] strings = strings[:max_len] self.beginWriteArray(key) for i in range(len(strings)): self.setArrayIndex(i) self.setValue("entry", strings[i]) self.endArray()
Prepend a fixed-length string list with a new string. The oldest string will be removed from the list. If the string is already in the list, it is shuffled to the top. Use this to implement things like a 'most recent files' entry.
11,917
def token(cls: Type[ConditionType], left: Any, op: Optional[Any] = None, right: Optional[Any] = None) -> ConditionType: condition = cls() condition.left = left if op: condition.op = op if right: condition.right = right return condition
Return Condition instance from arguments and Operator :param left: Left argument :param op: Operator :param right: Right argument :return:
11,918
def missing_any(da, freq, **kwds): r c = da.notnull().resample(time=freq).sum(dim=) if in freq: pfreq, anchor = freq.split() else: pfreq = freq if pfreq.endswith(): start_time = c.indexes[] end_time = start_time.shift(1, freq=freq) else: end_time = c.indexes[] start_time = end_time.shift(-1, freq=freq) n = (end_time - start_time).days nda = xr.DataArray(n.values, coords={: c.time}, dims=) return c != nda
r"""Return a boolean DataArray indicating whether there are missing days in the resampled array. Parameters ---------- da : DataArray Input array at daily frequency. freq : str Resampling frequency. Returns ------- out : DataArray A boolean array set to True if any month or year has missing values.
11,919
def get_snapshot_policies(self, view=None): return self._get("snapshots/policies", ApiSnapshotPolicy, True, params=view and dict(view=view) or None, api_version=6)
Retrieve a list of snapshot policies. @param view: View to materialize. Valid values are 'full', 'summary', 'export', 'export_redacted'. @return: A list of snapshot policies. @since: API v6
11,920
def get_placeholder_image(width, height, name=None, fg_color=get_color(), bg_color=get_color(), text=None, font=u, fontsize=42, encoding=u, mode=, fmt=u): size = (width, height) text = text if text else .format(width, height) try: font = ImageFont.truetype(font, size=fontsize, encoding=encoding) except IOError: font = ImageFont.load_default() result_img = Image.new(mode, size, bg_color) text_size = font.getsize(text) text_img = Image.new("RGBA", size, bg_color) left = size[0] / 2 - text_size[0] / 2 top = size[1] / 2 - text_size[1] / 2 drawing = ImageDraw.Draw(text_img) drawing.text((left, top), text, font=font, fill=fg_color) txt_img = ImageOps.fit(text_img, size, method=Image.BICUBIC, centering=(0.5, 0.5)) result_img.paste(txt_img) file_obj = io.BytesIO() txt_img.save(file_obj, fmt) return file_obj.getvalue()
Little spin-off from https://github.com/Visgean/python-placeholder that not saves an image and instead returns it.
11,921
def get_pressure(self): self._init_pressure() pressure = 0 data = self._pressure.pressureRead() if (data[0]): pressure = data[1] return pressure
Returns the pressure in Millibars
11,922
def _construct_role(self, managed_policy_map): execution_role = IAMRole(self.logical_id + , attributes=self.get_passthrough_resource_attributes()) execution_role.AssumeRolePolicyDocument = IAMRolePolicies.lambda_assume_role_policy() managed_policy_arns = [ArnGenerator.generate_aws_managed_policy_arn()] if self.Tracing: managed_policy_arns.append(ArnGenerator.generate_aws_managed_policy_arn()) function_policies = FunctionPolicies({"Policies": self.Policies}, policy_template_processor=None) policy_documents = [] if self.DeadLetterQueue: policy_documents.append(IAMRolePolicies.dead_letter_queue_policy( self.dead_letter_queue_policy_actions[self.DeadLetterQueue[]], self.DeadLetterQueue[])) for index, policy_entry in enumerate(function_policies.get()): if policy_entry.type is PolicyTypes.POLICY_STATEMENT: policy_documents.append({ : execution_role.logical_id + + str(index), : policy_entry.data }) elif policy_entry.type is PolicyTypes.MANAGED_POLICY: policy_arn = policy_entry.data if isinstance(policy_entry.data, string_types) and policy_entry.data in managed_policy_map: policy_arn = managed_policy_map[policy_entry.data] if policy_arn not in managed_policy_arns: managed_policy_arns.append(policy_arn) else: raise InvalidResourceException( self.logical_id, "Policy at index {} in the property is not valid".format(index)) execution_role.ManagedPolicyArns = list(managed_policy_arns) execution_role.Policies = policy_documents or None execution_role.PermissionsBoundary = self.PermissionsBoundary return execution_role
Constructs a Lambda execution role based on this SAM function's Policies property. :returns: the generated IAM Role :rtype: model.iam.IAMRole
11,923
def CopyToProto(self, proto): if (self.file is not None and self._serialized_start is not None and self._serialized_end is not None): proto.ParseFromString(self.file.serialized_pb[ self._serialized_start:self._serialized_end]) else: raise Error()
Copies this to the matching proto in descriptor_pb2. Args: proto: An empty proto instance from descriptor_pb2. Raises: Error: If self couldnt be serialized, due to to few constructor arguments.
11,924
def get_region_nt_counts(region, bam, stranded=False): if type(bam) == str: bam = pysam.AlignmentFile(bam, ) if type(region) is str: r = parse_region(region) if len(r) == 3: chrom, start, end = r elif len(r) == 4: chrom, start, end, strand = r start = int(start) end = int(end) ind = [.format(chrom, x) for x in range(start, end + 1)] pp = bam.pileup(region=region, truncate=True) elif type(region) is (list or tuple): chrom, start, end = region ind = [.format(chrom, x) for x in range(int(start) + 1, int(end) + 1)] pp = bam.pileup(chrom, start, end, truncate=True) cols = [, , , , ] if stranded: cols = [.format(x) for x in cols] + [.format(x) for x in cols] counts = pd.DataFrame(0, index=ind, columns=cols) for pc in pp: return counts
Get counts of each nucleotide from a bam file for a given region. If R1 and R2 reads both overlap a position, only one count will be added. If the R1 and R2 reads disagree at a position they both overlap, that read pair is not used for that position. Can optionally output strand-specific counts. Parameters ---------- region : str or list Region of type chrom:start-end, chrom:start-end:strand, or [chrom, start, end]. The strand is ignored for chrom:start-end:strand. For chrom:start-end, the coordinates are one-based inclusive. For example, the query chr1:10-11 will give you the counts for the 10th and 11th bases of chr1. For [chrom, start, end], the coordinates are zero-based and end exclusive (like a bed file). The query [chr1, 9, 11] will give you the coverage of the 10th and 11th bases of chr1. The region value is passed directly to pysam's pileup function. bam : pysam.calignmentfile.AlignmentFile or str Bam file opened with pysam or path to bam file (must be sorted and indexed). stranded : boolean Boolean indicating whether read data is stranded and stranded nucleotide counts should be returned. Assumes R1 read on reverse strand implies + strand coverage etc. Returns ------- counts : pandas.DataFrame Data frame with the counts for each base in the region. The index of this data frame is one-based for compatibility with VCF files.
11,925
def is_rootlevel(self): if self.is_root(): return False parent_name = None parent_dict = self._json_data.get() if parent_dict and in parent_dict: parent_name = parent_dict.get() if not parent_dict: parent_name = self._client.activity(id=self._json_data.get()).name if parent_name in ActivityRootNames.values(): return True return False
Determine if the Activity is at the root level of a project. It will look for the name of the parent which should be either ActivityRootNames.WORKFLOW_ROOT or ActivityRootNames.CATALOG_ROOT. If the name of the parent cannot be found an additional API call is made to retrieve the parent object (based on the `parent_id` in the json_data). :return: Return True if it is a root level activity, otherwise return False :rtype: bool
11,926
def update(self, friendly_name=values.unset, default_service_role_sid=values.unset, default_channel_role_sid=values.unset, default_channel_creator_role_sid=values.unset, read_status_enabled=values.unset, reachability_enabled=values.unset, typing_indicator_timeout=values.unset, consumption_report_interval=values.unset, notifications_new_message_enabled=values.unset, notifications_new_message_template=values.unset, notifications_new_message_sound=values.unset, notifications_new_message_badge_count_enabled=values.unset, notifications_added_to_channel_enabled=values.unset, notifications_added_to_channel_template=values.unset, notifications_added_to_channel_sound=values.unset, notifications_removed_from_channel_enabled=values.unset, notifications_removed_from_channel_template=values.unset, notifications_removed_from_channel_sound=values.unset, notifications_invited_to_channel_enabled=values.unset, notifications_invited_to_channel_template=values.unset, notifications_invited_to_channel_sound=values.unset, pre_webhook_url=values.unset, post_webhook_url=values.unset, webhook_method=values.unset, webhook_filters=values.unset, limits_channel_members=values.unset, limits_user_channels=values.unset, media_compatibility_message=values.unset, pre_webhook_retry_count=values.unset, post_webhook_retry_count=values.unset, notifications_log_enabled=values.unset): return self._proxy.update( friendly_name=friendly_name, default_service_role_sid=default_service_role_sid, default_channel_role_sid=default_channel_role_sid, default_channel_creator_role_sid=default_channel_creator_role_sid, read_status_enabled=read_status_enabled, reachability_enabled=reachability_enabled, typing_indicator_timeout=typing_indicator_timeout, consumption_report_interval=consumption_report_interval, notifications_new_message_enabled=notifications_new_message_enabled, notifications_new_message_template=notifications_new_message_template, notifications_new_message_sound=notifications_new_message_sound, notifications_new_message_badge_count_enabled=notifications_new_message_badge_count_enabled, notifications_added_to_channel_enabled=notifications_added_to_channel_enabled, notifications_added_to_channel_template=notifications_added_to_channel_template, notifications_added_to_channel_sound=notifications_added_to_channel_sound, notifications_removed_from_channel_enabled=notifications_removed_from_channel_enabled, notifications_removed_from_channel_template=notifications_removed_from_channel_template, notifications_removed_from_channel_sound=notifications_removed_from_channel_sound, notifications_invited_to_channel_enabled=notifications_invited_to_channel_enabled, notifications_invited_to_channel_template=notifications_invited_to_channel_template, notifications_invited_to_channel_sound=notifications_invited_to_channel_sound, pre_webhook_url=pre_webhook_url, post_webhook_url=post_webhook_url, webhook_method=webhook_method, webhook_filters=webhook_filters, limits_channel_members=limits_channel_members, limits_user_channels=limits_user_channels, media_compatibility_message=media_compatibility_message, pre_webhook_retry_count=pre_webhook_retry_count, post_webhook_retry_count=post_webhook_retry_count, notifications_log_enabled=notifications_log_enabled, )
Update the ServiceInstance :param unicode friendly_name: A string to describe the resource :param unicode default_service_role_sid: The service role assigned to users when they are added to the service :param unicode default_channel_role_sid: The channel role assigned to users when they are added to a channel :param unicode default_channel_creator_role_sid: The channel role assigned to a channel creator when they join a new channel :param bool read_status_enabled: Whether to enable the Message Consumption Horizon feature :param bool reachability_enabled: Whether to enable the Reachability Indicator feature for this Service instance :param unicode typing_indicator_timeout: How long in seconds to wait before assuming the user is no longer typing :param unicode consumption_report_interval: DEPRECATED :param bool notifications_new_message_enabled: Whether to send a notification when a new message is added to a channel :param unicode notifications_new_message_template: The template to use to create the notification text displayed when a new message is added to a channel :param unicode notifications_new_message_sound: The name of the sound to play when a new message is added to a channel :param bool notifications_new_message_badge_count_enabled: Whether the new message badge is enabled :param bool notifications_added_to_channel_enabled: Whether to send a notification when a member is added to a channel :param unicode notifications_added_to_channel_template: The template to use to create the notification text displayed when a member is added to a channel :param unicode notifications_added_to_channel_sound: The name of the sound to play when a member is added to a channel :param bool notifications_removed_from_channel_enabled: Whether to send a notification to a user when they are removed from a channel :param unicode notifications_removed_from_channel_template: The template to use to create the notification text displayed to a user when they are removed :param unicode notifications_removed_from_channel_sound: The name of the sound to play to a user when they are removed from a channel :param bool notifications_invited_to_channel_enabled: Whether to send a notification when a user is invited to a channel :param unicode notifications_invited_to_channel_template: The template to use to create the notification text displayed when a user is invited to a channel :param unicode notifications_invited_to_channel_sound: The name of the sound to play when a user is invited to a channel :param unicode pre_webhook_url: The webhook URL for pre-event webhooks :param unicode post_webhook_url: The URL for post-event webhooks :param unicode webhook_method: The HTTP method to use for both PRE and POST webhooks :param unicode webhook_filters: The list of WebHook events that are enabled for this Service instance :param unicode limits_channel_members: The maximum number of Members that can be added to Channels within this Service :param unicode limits_user_channels: The maximum number of Channels Users can be a Member of within this Service :param unicode media_compatibility_message: The message to send when a media message has no text :param unicode pre_webhook_retry_count: Count of times webhook will be retried in case of timeout or 429/503/504 HTTP responses :param unicode post_webhook_retry_count: The number of times calls to the `post_webhook_url` will be retried :param bool notifications_log_enabled: Whether to log notifications :returns: Updated ServiceInstance :rtype: twilio.rest.chat.v2.service.ServiceInstance
11,927
def _load_model(self): super()._load_model() self.mujoco_robot = Baxter() if self.has_gripper_right: self.gripper_right = gripper_factory(self.gripper_right_name) if not self.gripper_visualization: self.gripper_right.hide_visualization() self.mujoco_robot.add_gripper("right_hand", self.gripper_right) if self.has_gripper_left: self.gripper_left = gripper_factory(self.gripper_left_name) if not self.gripper_visualization: self.gripper_left.hide_visualization() self.mujoco_robot.add_gripper("left_hand", self.gripper_left)
Loads robot and optionally add grippers.
11,928
def get_default_cassandra_connection(): for alias, conn in get_cassandra_connections(): if conn.connection.default: return alias, conn return list(get_cassandra_connections())[0]
Return first default cassandra connection :return:
11,929
def _set_people(self, people): if hasattr(people, "object_type"): people = [people] elif hasattr(people, "__iter__"): people = list(people) return people
Sets who the object is sent to
11,930
def _simplify_arguments(arguments): if len(arguments.args) == 0: return arguments.kwargs elif len(arguments.kwargs) == 0: return arguments.args else: return arguments
If positional or keyword arguments are empty return only one or the other.
11,931
def add(self, new_results): for result in new_results: result.update(self.context) self.results = self.results.append(result, ignore_index=True)
Add new benchmark results.
11,932
def format_number_field(__, prec, number, locale): prec = NUMBER_DECIMAL_DIGITS if prec is None else int(prec) locale = Locale.parse(locale) pattern = locale.decimal_formats.get(None) return pattern.apply(number, locale, force_frac=(prec, prec))
Formats a number field.
11,933
def __ComputeEndByte(self, start, end=None, use_chunks=True): end_byte = end if start < 0 and not self.total_size: return end_byte if use_chunks: alternate = start + self.chunksize - 1 if end_byte is not None: end_byte = min(end_byte, alternate) else: end_byte = alternate if self.total_size: alternate = self.total_size - 1 if end_byte is not None: end_byte = min(end_byte, alternate) else: end_byte = alternate return end_byte
Compute the last byte to fetch for this request. This is all based on the HTTP spec for Range and Content-Range. Note that this is potentially confusing in several ways: * the value for the last byte is 0-based, eg "fetch 10 bytes from the beginning" would return 9 here. * if we have no information about size, and don't want to use the chunksize, we'll return None. See the tests for more examples. Args: start: byte to start at. end: (int or None, default: None) Suggested last byte. use_chunks: (bool, default: True) If False, ignore self.chunksize. Returns: Last byte to use in a Range header, or None.
11,934
def run_ut_python3_qemu_internal(): pkg = glob.glob()[0] logging.info("=== NOW Running inside QEMU ===") logging.info("PIP Installing %s", pkg) check_call([, , , pkg]) logging.info("PIP Installing mxnet/test_requirements.txt") check_call([, , , , ]) logging.info("Running tests in mxnet/tests/python/unittest/") check_call([, , , , , , ])
this runs inside the vm
11,935
def do_status(self, arg): info = self.arm.get_info() max_len = len(max(info.keys(), key=len)) print(self.style.theme()) for key, value in info.items(): print(self.style.help(key.ljust(max_len + 2), str(value))) print()
Print information about the arm.
11,936
def parse(self, buf: memoryview, params: Params) \ -> Tuple[Command, memoryview]: try: tag, buf = Tag.parse(buf, params) except NotParseable as exc: return InvalidCommand(params, exc), buf[0:0] else: params = params.copy(tag=tag.value) cmd_parts: List[bytes] = [] while True: try: _, buf = Space.parse(buf, params) atom, buf = Atom.parse(buf, params) cmd_parts.append(atom.value.upper()) except NotParseable as exc: return InvalidCommand(params, exc), buf[0:0] command = b.join(cmd_parts) cmd_type = self.commands.get(command) if not cmd_type: return InvalidCommand(params, None, command), buf[0:0] elif not cmd_type.compound: break params = params.copy(command_name=command) try: return cmd_type.parse(buf, params) except NotParseable as exc: return InvalidCommand(params, exc, command, cmd_type), buf[0:0]
Parse the given bytes into a command. The basic syntax is a tag string, a command name, possibly some arguments, and then an endline. If the command has a complete structure but cannot be parsed, an :class:`InvalidCommand` is returned. Args: buf: The bytes to parse. params: The parsing parameters.
11,937
def object_to_json(obj): if isinstance(obj, (datetime.datetime, datetime.date, datetime.time)): return obj.isoformat() return str(obj)
Convert object that cannot be natively serialized by python to JSON representation.
11,938
def multi_p_run(tot_num, _func, worker, params, n_process): from multiprocessing import Process, Queue out_q = Queue() procs = [] split_num = split_seq(list(range(0, tot_num)), n_process) print(tot_num, ">>", split_num) split_len = len(split_num) if n_process > split_len: n_process = split_len for i in range(n_process): _p = Process(target=_func, args=(worker, split_num[i][0], split_num[i][1], params, out_q)) _p.daemon = True procs.append(_p) _p.start() try: result = [] for i in range(n_process): result.append(out_q.get()) for i in procs: i.join() except KeyboardInterrupt: print() for i in procs: i.terminate() i.join() return -1 while not out_q.empty(): print(out_q.get(block=False)) return result
Run _func with multi-process using params.
11,939
def _get(self, uri): resp, resp_body = self.api.method_get(uri) return self.resource_class(self, resp_body, self.response_key, loaded=True)
Handles the communication with the API when getting a specific resource managed by this class.
11,940
def response(self): describe_request_params = {} if self.filter is not None: if type(self.filter) is not dict: try: filters = json.loads(self.filter) except TypeError: filters = self._parse_cli_filters(self.filter) else: filters = self.filter describe_request_params[] = filters if self.vpc_ids is not None: if not in describe_request_params: describe_request_params[] = [] describe_request_params[].append({ : , : self.vpc_ids.split() }) reservations = self.session().client().describe_instances(**describe_request_params) return self._process_reservations(reservations)
Dictionary of public and private, hostnames and ips. :rtype: dict
11,941
def _LinearMapByteStream( self, byte_stream, byte_offset=0, context=None, **unused_kwargs): elements_data_size = self._data_type_definition.GetByteSize() self._CheckByteStreamSize(byte_stream, byte_offset, elements_data_size) try: struct_tuple = self._operation.ReadFrom(byte_stream[byte_offset:]) mapped_values = map(self._element_data_type_map.MapValue, struct_tuple) except Exception as exception: error_string = ( ).format( self._data_type_definition.name, byte_offset, exception) raise errors.MappingError(error_string) if context: context.byte_size = elements_data_size return tuple(mapped_values)
Maps a data type sequence on a byte stream. Args: byte_stream (bytes): byte stream. byte_offset (Optional[int]): offset into the byte stream where to start. context (Optional[DataTypeMapContext]): data type map context. Returns: tuple[object, ...]: mapped values. Raises: MappingError: if the data type definition cannot be mapped on the byte stream.
11,942
def codes2unicode(codes, composed=True): pua = u.join(unichr(code) for code in codes) return translate(pua, composed=composed)
Convert Hanyang-PUA code iterable to Syllable-Initial-Peak-Final encoded unicode string. :param codes: an iterable of Hanyang-PUA code :param composed: the result should be composed as much as possible (default True) :return: Syllable-Initial-Peak-Final encoded unicode string
11,943
def write(self, s): try: self._write_lock.acquire() self.handle.sendall(s) except socket.timeout: self._connect() except socket.error: raise IOError finally: self._write_lock.release()
Write wrapper. Parameters ---------- s : bytes Bytes to write
11,944
def _write_current_buffer_for_group_key(self, key): write_info = self.write_buffer.pack_buffer(key) self.write(write_info.get(), self.write_buffer.grouping_info[key][]) self.write_buffer.clean_tmp_files(write_info) self.write_buffer.add_new_buffer_for_group(key)
Find the buffer for a given group key, prepare it to be written and writes it calling write() method.
11,945
def mode_string_v10(msg): if msg.autopilot == mavlink.MAV_AUTOPILOT_PX4: return interpret_px4_mode(msg.base_mode, msg.custom_mode) if not msg.base_mode & mavlink.MAV_MODE_FLAG_CUSTOM_MODE_ENABLED: return "Mode(0x%08x)" % msg.base_mode if msg.type in [ mavlink.MAV_TYPE_QUADROTOR, mavlink.MAV_TYPE_HEXAROTOR, mavlink.MAV_TYPE_OCTOROTOR, mavlink.MAV_TYPE_TRICOPTER, mavlink.MAV_TYPE_COAXIAL, mavlink.MAV_TYPE_HELICOPTER ]: if msg.custom_mode in mode_mapping_acm: return mode_mapping_acm[msg.custom_mode] if msg.type == mavlink.MAV_TYPE_FIXED_WING: if msg.custom_mode in mode_mapping_apm: return mode_mapping_apm[msg.custom_mode] if msg.type == mavlink.MAV_TYPE_GROUND_ROVER: if msg.custom_mode in mode_mapping_rover: return mode_mapping_rover[msg.custom_mode] if msg.type == mavlink.MAV_TYPE_ANTENNA_TRACKER: if msg.custom_mode in mode_mapping_tracker: return mode_mapping_tracker[msg.custom_mode] return "Mode(%u)" % msg.custom_mode
mode string for 1.0 protocol, from heartbeat
11,946
def find_features(seqs, locus_tag="all", utr_len=200): found_features = [] for seq_i in seqs: for feature in seq_i.features: if feature.type == "CDS" and (locus_tag == "all" or \ ( in feature.qualifiers and \ feature.qualifiers[][0] == locus_tag)): start = max(0, feature.location.nofuzzy_start - utr_len) stop = max(0, feature.location.nofuzzy_end + utr_len) feature_seq = seq_i.seq[start:stop] f_match = FeatureMatch(feature, feature_seq, feature.strand, utr_len) found_features.append(f_match) return found_features
Find features in sequences by locus tag
11,947
def ase(dbuser, dbpassword, args, gui): if dbuser == : dbpassword = db = CathubPostgreSQL(user=dbuser, password=dbpassword) db._connect() server_name = db.server_name subprocess.call( ("ase db {} {}".format(server_name, args)).split()) if gui: args = args.split()[0] subprocess.call( (.format(server_name, args)).split())
Connection to atomic structures on the Catalysis-Hub server with ase db cli. Arguments to the the ase db cli client must be enclosed in one string. For example: <cathub ase 'formula=Ag6In6H -s energy -L 200'>. To see possible ase db arguments run <ase db --help>
11,948
def update_domain_queues(self): for key in self.domain_config: final_key = "{name}:{domain}:queue".format( name=self.spider.name, domain=key) if final_key in self.queue_dict: self.queue_dict[final_key][0].window = float(self.domain_config[key][]) self.logger.debug("Updated queue {q} with new config" .format(q=final_key)) if in self.domain_config[key]: hits = int(self.domain_config[key][] * self.fit_scale( self.domain_config[key][])) self.queue_dict[final_key][0].limit = float(hits) else: self.queue_dict[final_key][0].limit = float(self.domain_config[key][])
Check to update existing queues already in memory new queues are created elsewhere
11,949
def search_mergedcell_value(xl_sheet, merged_range): for search_row_idx in range(merged_range[0], merged_range[1]): for search_col_idx in range(merged_range[2], merged_range[3]): if xl_sheet.cell(search_row_idx, search_col_idx).value: return xl_sheet.cell(search_row_idx, search_col_idx) return False
Search for a value in merged_range cells.
11,950
def _clear(self): ret = ([],[]) for q in self.queues.values(): pr = q._clear() ret[0].extend(pr[0]) ret[1].extend(pr[1]) self.totalSize = 0 del self.prioritySet[:] if self.isWaited and self.canAppend(): self.isWaited = False ret[0].append(QueueCanWriteEvent(self)) if self.isWaitEmpty and not self: self.isWaitEmpty = False ret[1].append(QueueIsEmptyEvent(self)) self.blockEvents.clear() return ret
Actual clear
11,951
def dataframe(self): if self._away_goals is None and self._home_goals is None: return None fields_to_include = { : self.arena, : self.attendance, : self.away_assists, : self.away_even_strength_assists, : self.away_even_strength_goals, : self.away_game_winning_goals, : self.away_goals, : self.away_penalties_in_minutes, : self.away_points, : self.away_power_play_assists, : self.away_power_play_goals, : self.away_save_percentage, : self.away_saves, : self.away_shooting_percentage, : self.away_short_handed_assists, : self.away_short_handed_goals, : self.away_shots_on_goal, : self.away_shutout, : self.date, : self.duration, : self.home_assists, : self.home_even_strength_assists, : self.home_even_strength_goals, : self.home_game_winning_goals, : self.home_goals, : self.home_penalties_in_minutes, : self.home_points, : self.home_power_play_assists, : self.home_power_play_goals, : self.home_save_percentage, : self.home_saves, : self.home_shooting_percentage, : self.home_short_handed_assists, : self.home_short_handed_goals, : self.home_shots_on_goal, : self.home_shutout, : self.losing_abbr, : self.losing_name, : self.time, : self.winner, : self.winning_abbr, : self.winning_name } return pd.DataFrame([fields_to_include], index=[self._uri])
Returns a pandas DataFrame containing all other class properties and values. The index for the DataFrame is the string URI that is used to instantiate the class, such as '201806070VEG'.
11,952
def GetByteSize(self): if not self.element_data_type_definition: return None if self.elements_data_size: return self.elements_data_size if not self.number_of_elements: return None element_byte_size = self.element_data_type_definition.GetByteSize() if not element_byte_size: return None return element_byte_size * self.number_of_elements
Retrieves the byte size of the data type definition. Returns: int: data type size in bytes or None if size cannot be determined.
11,953
def check_positive_flux(cls, kwargs_ps): pos_bool = True for kwargs in kwargs_ps: point_amp = kwargs[] for amp in point_amp: if amp < 0: pos_bool = False break return pos_bool
check whether inferred linear parameters are positive :param kwargs_ps: :return: bool
11,954
def load_genotypes(self): lb = self.chunk * Parser.chunk_stride + 2 ub = (self.chunk + 1) * Parser.chunk_stride + 2 buff = None self.current_file = self.archives[self.file_index] self.info_file = self.info_files[self.file_index] while buff is None: try: buff = self.parse_genotypes(lb, ub) except EOFError: buff = None if self.file_index < (len(self.archives) - 1): self.file_index += 1 self.chunk = 0 lb = self.chunk * Parser.chunk_stride + 2 ub = (self.chunk + 1) * Parser.chunk_stride + 2 self.current_file = self.archives[self.file_index] self.info_file = self.info_files[self.file_index] else: raise StopIteration self.dosages = numpy.transpose(buff) file = self.openfile(self.info_file) file.readline() lindex = 0 while lindex < lb - 2: file.readline() lindex += 1 self.markers = [] self.rsids = [] self.locus_count= 0 self.maf = [] self.alleles = [] self.rsquared = [] while lindex < (ub - 2): words = file.readline().strip().split() if len(words) > 0: loc, al2, al1, freq1, maf, avgcall,rsq = words[0:7] marker = [-1, lindex] if self.chrpos_encoding: marker = [int(x) for x in loc.split(":")[0:2]] if len(marker) < 2: raise libgwas.exceptions.MalformedInputFile("MACH .info"+ " file IDs must be in the format chrom:rsid") if len(marker) > 2: self.rsids.append(marker[2]) self.markers.append(marker[0:2]) else: self.markers.append(lindex) self.rsids.append(loc) self.maf.append(float(maf)) self.alleles.append([al1, al2]) self.rsquared.append(float(rsq)) lindex += 1 else: break if self.dosages.shape[0] != len(self.markers): print >> sys.stderr, "What is going on? I have ", \ self.dosages.shape[0], "dosages per individual and ", \ len(self.markers), self.markers self.chunk += 1 self.marker_count = len(self.markers)
Actually loads the first chunk of genotype data into memory due to \ the individual oriented format of MACH data. Due to the fragmented approach to data loading necessary to avoid running out of RAM, this function will initialize the data structures with the first chunk of loci and prepare it for otherwise normal iteration. Also, because the parser can be assigned more than one .gen file to read from, it will automatically move to the next file when the first is exhausted.
11,955
def _on_io_events(self, fd=None, _events=None): if fd not in self._connections: LOGGER.warning() return self._poll_connection(fd)
Invoked by Tornado's IOLoop when there are events for the fd :param int fd: The file descriptor for the event :param int _events: The events raised
11,956
def __get_overall_data(self, x): if isinstance(x, dict): if "sensorGenus" in x: if x["sensorGenus"] and x["sensorGenus"] not in self.lsts_tmp["genus"]: self.lsts_tmp["genus"].append(x["sensorGenus"]) if "sensorSpecies" in x: if x["sensorSpecies"] and x["sensorSpecies"] not in self.lsts_tmp["species"]: self.lsts_tmp["species"].append(x["sensorSpecies"]) if "archiveType" in x: if x["archiveType"] and x["archiveType"] not in self.lsts_tmp["archive"]: self.lsts_tmp["archive"].append(x["archiveType"]) if "QCnotes" in x: if x["QCnotes"] and x["QCnotes"] not in self.lsts_tmp["qc"]: self.lsts_tmp["qc"].append(x["QCnotes"]) for k, v in x.items(): if isinstance(v, dict): self.__get_overall_data(v) elif isinstance(v, list): self.__get_overall_data(v) elif isinstance(x, list): for i in x: self.__get_overall_data(i) return x
(recursive) Collect all "sensorGenus" and "sensorSpecies" fields, set data to self :param any x: Any data type :return none:
11,957
def set_sample_probability(probability): global _sample_probability if not 0.0 <= probability <= 1.0: raise ValueError() LOGGER.debug(, probability) _sample_probability = float(probability)
Set the probability that a batch will be submitted to the InfluxDB server. This should be a value that is greater than or equal to ``0`` and less than or equal to ``1.0``. A value of ``0.25`` would represent a probability of 25% that a batch would be written to InfluxDB. :param float probability: The value between 0 and 1.0 that represents the probability that a batch will be submitted to the InfluxDB server.
11,958
def conv_gru(x, kernel_size, filters, padding="SAME", dilation_rate=(1, 1), name=None, reuse=None): def do_conv(args, name, bias_start, padding): return conv( args, filters, kernel_size, padding=padding, dilation_rate=dilation_rate, bias_initializer=tf.constant_initializer(bias_start), name=name) with tf.variable_scope( name, default_name="conv_gru", values=[x], reuse=reuse): reset = saturating_sigmoid(do_conv(x, "reset", 1.0, padding)) gate = saturating_sigmoid(do_conv(x, "gate", 1.0, padding)) candidate = tf.tanh(do_conv(reset * x, "candidate", 0.0, padding)) return gate * x + (1 - gate) * candidate
Convolutional GRU in 1 dimension.
11,959
def run_transaction(transactor, callback): if isinstance(transactor, sqlalchemy.engine.Connection): return _txn_retry_loop(transactor, callback) elif isinstance(transactor, sqlalchemy.engine.Engine): with transactor.connect() as connection: return _txn_retry_loop(connection, callback) elif isinstance(transactor, sqlalchemy.orm.sessionmaker): session = transactor(autocommit=True) return _txn_retry_loop(session, callback) else: raise TypeError("don't know how to run a transaction on %s", type(transactor))
Run a transaction with retries. ``callback()`` will be called with one argument to execute the transaction. ``callback`` may be called more than once; it should have no side effects other than writes to the database on the given connection. ``callback`` should not call ``commit()` or ``rollback()``; these will be called automatically. The ``transactor`` argument may be one of the following types: * `sqlalchemy.engine.Connection`: the same connection is passed to the callback. * `sqlalchemy.engine.Engine`: a connection is created and passed to the callback. * `sqlalchemy.orm.sessionmaker`: a session is created and passed to the callback.
11,960
def escape_identifier(text, reg=KWD_RE): if not text: return "_" if text[0].isdigit(): text = "_" + text return reg.sub(r"\1_", text)
Escape partial C identifiers so they can be used as attributes/arguments
11,961
def get(self): click_tracking = {} if self.enable is not None: click_tracking["enable"] = self.enable if self.enable_text is not None: click_tracking["enable_text"] = self.enable_text return click_tracking
Get a JSON-ready representation of this ClickTracking. :returns: This ClickTracking, ready for use in a request body. :rtype: dict
11,962
def iter_successors(self, graph, orig, branch, turn, tick, *, forward=None): if self.db._no_kc: yield from self._adds_dels_sucpred(self.successors[graph, orig], branch, turn, tick)[0] return if forward is None: forward = self.db._forward yield from self._get_destcache(graph, orig, branch, turn, tick, forward=forward)
Iterate over successors of a given origin node at a given time.
11,963
def to_vector(np_array): if len(np_array.shape) == 1: return Vectors.dense(np_array) else: raise Exception("An MLLib Vector can only be created from a one-dimensional " + "numpy array, got {}".format(len(np_array.shape)))
Convert numpy array to MLlib Vector
11,964
def nextindx(self): indx = 0 with s_lmdbslab.Scan(self.slab, self.db) as curs: last_key = curs.last_key() if last_key is not None: indx = s_common.int64un(last_key) + 1 return indx
Determine the next insert offset according to storage. Returns: int: The next insert offset.
11,965
def load_config(self, config=None): cfgname = (config or self.config_name) cfgname = if cfgname is None else cfgname assert isinstance(cfgname, six.string_types), config_name = cfgname if cfgname.endswith() else .format(cfgname) self.configfile = os.path.join(self.treedir, , config_name) assert os.path.isfile(self.configfile) is True, .format(self.configfile) self._cfg = SafeConfigParser() try: self._cfg.read(self.configfile.decode()) except AttributeError: self._cfg.read(self.configfile) self.environ = OrderedDict() self.environ[] = self._cfg.defaults() self._file_replace = if self.environ[][] == self._file_replace: self.environ[][] = self.sasbasedir
loads a config file Parameters: config (str): Optional name of manual config file to load
11,966
def cleanUpdatesList(self, col, cellIdx, seg): for key, updateList in self.segmentUpdates.iteritems(): c,i = key[0], key[1] if c == col and i == cellIdx: for update in updateList: if update[1].segment == seg: self.removeSegmentUpdate(update)
Removes any update that would be for the given col, cellIdx, segIdx. NOTE: logically, we need to do this when we delete segments, so that if an update refers to a segment that was just deleted, we also remove that update from the update list. However, I haven't seen it trigger in any of the unit tests yet, so it might mean that it's not needed and that situation doesn't occur, by construction. todo: check if that situation occurs.
11,967
def api(server, command, *args, **kwargs): ["MyGroup", "Description"] if in kwargs: arguments = kwargs[] else: arguments = args call = .format(command, arguments) try: client, key = _get_session(server) except Exception as exc: err_msg = .format(server, exc) log.error(err_msg) return {call: err_msg} namespace, method = command.split() endpoint = getattr(getattr(client, namespace), method) try: output = endpoint(key, *arguments) except Exception as e: output = .format(e) return {call: output}
Call the Spacewalk xmlrpc api. CLI Example: .. code-block:: bash salt-run spacewalk.api spacewalk01.domain.com systemgroup.create MyGroup Description salt-run spacewalk.api spacewalk01.domain.com systemgroup.create arguments='["MyGroup", "Description"]' State Example: .. code-block:: yaml create_group: salt.runner: - name: spacewalk.api - server: spacewalk01.domain.com - command: systemgroup.create - arguments: - MyGroup - Description
11,968
def get_issuer(request): if isinstance(request, etree._Element): elem = request else: if isinstance(request, Document): request = request.toxml() elem = fromstring(request, forbid_dtd=True) issuer = None issuer_nodes = OneLogin_Saml2_Utils.query(elem, ) if len(issuer_nodes) == 1: issuer = OneLogin_Saml2_Utils.element_text(issuer_nodes[0]) return issuer
Gets the Issuer of the Logout Request Message :param request: Logout Request Message :type request: string|DOMDocument :return: The Issuer :rtype: string
11,969
def calibrate(filename): params = calibration_to(filename) with nc.loader(filename) as root: for key, value in params.items(): nc.getdim(root, , 1) nc.getdim(root, , 1) if isinstance(value, list): for i in range(len(value)): nc.getvar(root, % (key, i), , (, , ))[:] = value[i] else: nc.getvar(root, key, , (, , ))[:] = value
Append the calibration parameters as variables of the netcdf file. Keyword arguments: filename -- the name of a netcdf file.
11,970
def add_nodes_from(self, nodes, weights=None): nodes = list(nodes) if weights: if len(nodes) != len(weights): raise ValueError("The number of elements in nodes and weights" "should be equal.") for index in range(len(nodes)): self.add_node(node=nodes[index], weight=weights[index]) else: for node in nodes: self.add_node(node=node)
Add multiple nodes to the Graph. **The behviour of adding weights is different than in networkx. Parameters ---------- nodes: iterable container A container of nodes (list, dict, set, or any hashable python object). weights: list, tuple (default=None) A container of weights (int, float). The weight value at index i is associated with the variable at index i. Examples -------- >>> from pgmpy.base import DAG >>> G = DAG() >>> G.add_nodes_from(nodes=['A', 'B', 'C']) >>> sorted(G.nodes()) ['A', 'B', 'C'] Adding nodes with weights: >>> G.add_nodes_from(nodes=['D', 'E'], weights=[0.3, 0.6]) >>> G.node['D'] {'weight': 0.3} >>> G.node['E'] {'weight': 0.6} >>> G.node['A'] {'weight': None}
11,971
def correlate(h1, h2): r h1, h2 = __prepare_histogram(h1, h2) h1m = h1 - scipy.sum(h1) / float(h1.size) h2m = h2 - scipy.sum(h2) / float(h2.size) a = scipy.sum(scipy.multiply(h1m, h2m)) b = math.sqrt(scipy.sum(scipy.square(h1m)) * scipy.sum(scipy.square(h2m))) return 0 if 0 == b else a / b
r""" Correlation between two histograms. The histogram correlation between two histograms :math:`H` and :math:`H'` of size :math:`m` is defined as: .. math:: d_{corr}(H, H') = \frac{ \sum_{m=1}^M (H_m-\bar{H}) \cdot (H'_m-\bar{H'}) }{ \sqrt{\sum_{m=1}^M (H_m-\bar{H})^2 \cdot \sum_{m=1}^M (H'_m-\bar{H'})^2} } with :math:`\bar{H}` and :math:`\bar{H'}` being the mean values of :math:`H` resp. :math:`H'` *Attributes:* - not a metric, a similarity *Attributes for normalized histograms:* - :math:`d(H, H')\in[-1, 1]` - :math:`d(H, H) = 1` - :math:`d(H, H') = d(H', H)` *Attributes for not-normalized histograms:* - :math:`d(H, H')\in[-1, 1]` - :math:`d(H, H) = 1` - :math:`d(H, H') = d(H', H)` *Attributes for not-equal histograms:* - not applicable Parameters ---------- h1 : sequence The first histogram. h2 : sequence The second histogram, same bins as ``h1``. Returns ------- correlate : float Correlation between the histograms. Notes ----- Returns 0 if one of h1 or h2 contain only zeros.
11,972
def update_execution_state_kernel(self): client = self.get_current_client() if client is not None: executing = client.stop_button.isEnabled() self.interrupt_action.setEnabled(executing)
Update actions following the execution state of the kernel.
11,973
def update(self, dict_name, mapping=None, priorities=None, expire=None, locks=None): lockslocks if self._session_lock_identifier is None: raise ProgrammerError() if priorities is None: priorities = defaultdict(int) if locks is None: locks = defaultdict(lambda: ) if not (expire is None or isinstance(expire, int)): raise ProgrammerError() conn = redis.Redis(connection_pool=self.pool) script = conn.register_script(j:"") dict_name = self._namespace(dict_name) if mapping is None: mapping = {} items = [] for key, value in mapping.iteritems(): items.append(self._encode(key)) items.append(self._encode(value)) items.append(priorities[key]) items.append(self._encode(locks[key])) res = script(keys=[self._lock_name, dict_name, dict_name + , dict_name + ], args=[self._session_lock_identifier, expire] + items) if res[0] == 0: raise EnvironmentError( % dict_name) elif res[0] == -1: raise EnvironmentError( % (self._decode(res[1]), res[2], res[3], dict_name))
Add mapping to a dictionary, replacing previous values Can be called with only dict_name and expire to refresh the expiration time. NB: locks are only enforced if present, so nothing prevents another caller from coming in an modifying data without using locks. :param mapping: a dict of keys and values to update in dict_name. Must be specified if priorities is specified. :param priorities: a dict with the same keys as those in mapping that provides a numerical value indicating the priority to assign to that key. Default sets 0 for all keys. :param int expire: if specified, then dict_name will be set to expire in that many seconds. :param locks: a dict with the same keys as those in the mapping. Before making any particular update, this function checks if a key is present in a 'locks' table for this dict, and if so, then its value must match the value provided in the input locks dict for that key. If not, then the value provided in the locks dict is inserted into the 'locks' table. If the locks parameter is None, then no lock checking is performed.
11,974
def get_distribute_verbatim_metadata(self): metadata = dict(self._mdata[]) metadata.update({: self._my_map[]}) return Metadata(**metadata)
Gets the metadata for the distribute verbatim rights flag. return: (osid.Metadata) - metadata for the distribution rights fields *compliance: mandatory -- This method must be implemented.*
11,975
def add(self, entry): if self.os is None: import os self.os = os nm = entry[0] pth = entry[1] pynm, ext = self.os.path.splitext(self.os.path.basename(pth)) ispkg = pynm == assert ext in (, ) self.toc[nm] = (ispkg, self.lib.tell()) f = open(entry[1], ) f.seek(8) self.lib.write(f.read())
Override this to influence the mechanics of the Archive. Assumes entry is a seq beginning with (nm, pth, ...) where nm is the key by which we'll be asked for the object. pth is the name of where we find the object. Overrides of get_obj_from can make use of further elements in entry.
11,976
def create(self, handle, title=None, description=None): role = Role(handle=handle, title=title, description=description) schema = RoleSchema() valid = schema.process(role) if not valid: return valid db.session.add(role) db.session.commit() events.role_created_event.send(role) return role
Create a role
11,977
def getJobStatus(self, workers): jobInfo = self.JobStatus(self.__nupicJobID, workers) return jobInfo
Parameters: ---------------------------------------------------------------------- workers: If this job was launched outside of the nupic job engine, then this is an array of subprocess Popen instances, one for each worker retval: _NupicJob.JobStatus instance
11,978
def _quote_username(name): if not isinstance(name, six.string_types): return str(name) else: return salt.utils.stringutils.to_str(name)
Usernames can only contain ascii chars, so make sure we return a str type
11,979
def storage_type(self): nf = np.load(str(self.path), mmap_mode="c", allow_pickle=False) if np.iscomplexobj(nf): st = "field" else: st = "phase" return st
Depending on input data type, the storage type is either "field" (complex) or "phase" (real).
11,980
def cancel_job( self, project_id, region, job_id, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): if "cancel_job" not in self._inner_api_calls: self._inner_api_calls[ "cancel_job" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.cancel_job, default_retry=self._method_configs["CancelJob"].retry, default_timeout=self._method_configs["CancelJob"].timeout, client_info=self._client_info, ) request = jobs_pb2.CancelJobRequest( project_id=project_id, region=region, job_id=job_id ) return self._inner_api_calls["cancel_job"]( request, retry=retry, timeout=timeout, metadata=metadata )
Starts a job cancellation request. To access the job resource after cancellation, call `regions/{region}/jobs.list <https://cloud.google.com/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list>`__ or `regions/{region}/jobs.get <https://cloud.google.com/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get>`__. Example: >>> from google.cloud import dataproc_v1beta2 >>> >>> client = dataproc_v1beta2.JobControllerClient() >>> >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> >>> # TODO: Initialize `region`: >>> region = '' >>> >>> # TODO: Initialize `job_id`: >>> job_id = '' >>> >>> response = client.cancel_job(project_id, region, job_id) Args: project_id (str): Required. The ID of the Google Cloud Platform project that the job belongs to. region (str): Required. The Cloud Dataproc region in which to handle the request. job_id (str): Required. The job ID. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.dataproc_v1beta2.types.Job` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
11,981
def volume(self): volume = ((np.pi * self.primitive.radius ** 2) * self.primitive.height) return volume
The analytic volume of the cylinder primitive. Returns --------- volume : float Volume of the cylinder
11,982
def cli(ctx, board, fpga, pack, type, size, project_dir, verbose, verbose_yosys, verbose_arachne): exit_code = SCons(project_dir).time({ : board, : fpga, : size, : type, : pack, : { : verbose, : verbose_yosys, : verbose_arachne } }) ctx.exit(exit_code)
Bitstream timing analysis.
11,983
def autoLayout(self): try: direction = self.currentSlide().scene().direction() except AttributeError: direction = QtGui.QBoxLayout.TopToBottom size = self.size() self._slideshow.resize(size) prev = self._previousButton next = self._nextButton if direction == QtGui.QBoxLayout.BottomToTop: y = 9 else: y = size.height() - prev.height() - 9 prev.move(9, y) next.move(size.width() - next.width() - 9, y) for i in range(self._slideshow.count()): widget = self._slideshow.widget(i) widget.scene().autoLayout(size)
Automatically lays out the contents for this widget.
11,984
def fileopenbox(msg=None , title=None , default="*" , filetypes=None ): if sys.platform == : _bring_to_front() localRoot = Tk() localRoot.withdraw() initialbase, initialfile, initialdir, filetypes = fileboxSetup(default,filetypes) if (initialfile.find("*") < 0) and (initialfile.find("?") < 0): initialfile = None elif initialbase == "*": initialfile = None f = tk_FileDialog.askopenfilename(parent=localRoot , title=getFileDialogTitle(msg,title) , initialdir=initialdir , initialfile=initialfile , filetypes=filetypes ) localRoot.destroy() if not f: return None return os.path.normpath(f)
A dialog to get a file name. About the "default" argument ============================ The "default" argument specifies a filepath that (normally) contains one or more wildcards. fileopenbox will display only files that match the default filepath. If omitted, defaults to "*" (all files in the current directory). WINDOWS EXAMPLE:: ...default="c:/myjunk/*.py" will open in directory c:\myjunk\ and show all Python files. WINDOWS EXAMPLE:: ...default="c:/myjunk/test*.py" will open in directory c:\myjunk\ and show all Python files whose names begin with "test". Note that on Windows, fileopenbox automatically changes the path separator to the Windows path separator (backslash). About the "filetypes" argument ============================== If specified, it should contain a list of items, where each item is either:: - a string containing a filemask # e.g. "*.txt" - a list of strings, where all of the strings except the last one are filemasks (each beginning with "*.", such as "*.txt" for text files, "*.py" for Python files, etc.). and the last string contains a filetype description EXAMPLE:: filetypes = ["*.css", ["*.htm", "*.html", "HTML files"] ] NOTE THAT ========= If the filetypes list does not contain ("All files","*"), it will be added. If the filetypes list does not contain a filemask that includes the extension of the "default" argument, it will be added. For example, if default="*abc.py" and no filetypes argument was specified, then "*.py" will automatically be added to the filetypes argument. @rtype: string or None @return: the name of a file, or None if user chose to cancel @arg msg: the msg to be displayed. @arg title: the window title @arg default: filepath with wildcards @arg filetypes: filemasks that a user can choose, e.g. "*.txt"
11,985
def _parse_accented_syllable(unparsed_syllable): if unparsed_syllable[0] == : return unparsed_syllable[1:], for character in unparsed_syllable: if character in _ACCENTED_VOWELS: vowel, tone = _accented_vowel_to_numbered(character) return unparsed_syllable.replace(character, vowel), tone return unparsed_syllable,
Return the syllable and tone of an accented Pinyin syllable. Any accented vowels are returned without their accents. Implements the following algorithm: 1. If the syllable has an accent mark, convert that vowel to a regular vowel and add the tone to the end of the syllable. 2. Otherwise, assume the syllable is tone 5 (no accent marks).
11,986
def get_last_rconfiguration_id(topic_id, remoteci_id, db_conn=None): db_conn = db_conn or flask.g.db_conn __TABLE = models.JOBS query = sql.select([__TABLE.c.rconfiguration_id]). \ order_by(sql.desc(__TABLE.c.created_at)). \ where(sql.and_(__TABLE.c.topic_id == topic_id, __TABLE.c.remoteci_id == remoteci_id)). \ limit(1) rconfiguration_id = db_conn.execute(query).fetchone() if rconfiguration_id is not None: return str(rconfiguration_id[0]) else: return None
Get the rconfiguration_id of the last job run by the remoteci. :param topic_id: the topic :param remoteci_id: the remoteci id :return: last rconfiguration_id of the remoteci
11,987
def string_format(data, out=, opts=None, **kwargs): *keyvalue if not opts: opts = __opts__ return salt.output.string_format(data, out, opts=opts, **kwargs)
Return the outputter formatted string, removing the ANSI escape sequences. data The JSON serializable object. out: ``nested`` The name of the output to use to transform the data. Default: ``nested``. opts Dictionary of configuration options. Default: ``__opts__``. kwargs Arguments to sent to the outputter module. CLI Example: .. code-block:: bash salt '*' out.string_format "{'key': 'value'}" out=table
11,988
def _get_broadcast_shape(shape1, shape2): if shape1 == shape2: return shape1 length1 = len(shape1) length2 = len(shape2) if length1 > length2: shape = list(shape1) else: shape = list(shape2) i = max(length1, length2) - 1 for a, b in zip(shape1[::-1], shape2[::-1]): if a != 1 and b != 1 and a != b: raise ValueError( % (shape1, shape2)) shape[i] = max(a, b) i -= 1 return tuple(shape)
Given two shapes that are not identical, find the shape that both input shapes can broadcast to.
11,989
def small_integer(anon, obj, field, val): return anon.faker.small_integer(field=field)
Returns a random small integer (for a Django SmallIntegerField)
11,990
def set_connection_params(self, ip_address, tsap_snap7, tsap_logo): assert re.match(ipv4, ip_address), % ip_address result = self.library.Cli_SetConnectionParams(self.pointer, ip_address.encode(), c_uint16(tsap_snap7), c_uint16(tsap_logo)) if result != 0: raise Snap7Exception("The parameter was invalid")
Sets internally (IP, LocalTSAP, RemoteTSAP) Coordinates. This function must be called just before Cli_Connect(). :param ip_address: IP ip_address of server :param tsap_snap7: TSAP SNAP7 Client (e.g. 10.00 = 0x1000) :param tsap_logo: TSAP Logo Server (e.g. 20.00 = 0x2000)
11,991
def compose_path(pub, uuid_url=False): if uuid_url: return join( "/", UUID_DOWNLOAD_KEY, str(pub.uuid) ) return join( "/", DOWNLOAD_KEY, basename(pub.file_pointer), basename(pub.filename) )
Compose absolute path for given `pub`. Args: pub (obj): :class:`.DBPublication` instance. uuid_url (bool, default False): Compose URL using UUID. Returns: str: Absolute url-path of the publication, without server's address \ and protocol. Raises: PrivatePublicationError: When the `pub` is private publication.
11,992
def all(cls, include_deactivated=False): if include_deactivated: resources = yield cls.view.get(include_docs=True) else: resources = yield cls.active_view.get(include_docs=True) raise Return([cls(**resource[]) for resource in resources[]])
Get all resources :param include_deactivated: Include deactivated resources in response :returns: list of Document instances :raises: SocketError, CouchException
11,993
def isometric_build_atlased_mesh(script, BorderSize=0.1): filter_xml = .join([ , , % BorderSize, , , , , , , ]) util.write_filter(script, filter_xml) return None
Isometric parameterization: Build Atlased Mesh This actually generates the UV mapping from the isometric parameterization
11,994
def postorder(self, node=None): if node is None: node = self.ast try: first = iter(node) except TypeError: first = None if first: for kid in node: self.postorder(kid) try: name = + self.typestring(node) if hasattr(self, name): func = getattr(self, name) func(node) else: self.default(node) except GenericASTTraversalPruningException: return name = name + if hasattr(self, name): func = getattr(self, name) func(node)
Walk the tree in roughly 'postorder' (a bit of a lie explained below). For each node with typestring name *name* if the node has a method called n_*name*, call that before walking children. If there is no method define, call a self.default(node) instead. Subclasses of GenericASTTtraversal ill probably want to override this method. If the node has a method called *name*_exit, that is called after all children have been called. So in this sense this function is a lie. In typical use a node with children can call "postorder" in any order it wants which may skip children or order then in ways other than first to last. In fact, this this happens.
11,995
def get_event_log(self, object_id): content = self._fetch("/event_log/%s" % object_id, method="GET") return FastlyEventLog(self, content)
Get the specified event log.
11,996
def get_action_group_names(self): return self.get_group_names( list(itertools.chain( *[self._get_array(), self._get_array(), self._get_array()])))
Return all the security group names configured in this action.
11,997
def qImageToArray(qimage, dtype = ): result_shape = (qimage.height(), qimage.width()) temp_shape = (qimage.height(), qimage.bytesPerLine() * 8 // qimage.depth()) if qimage.format() in (QtGui.QImage.Format_ARGB32_Premultiplied, QtGui.QImage.Format_ARGB32, QtGui.QImage.Format_RGB32): if dtype == : dtype = np.dtype({: (np.uint8, 0), : (np.uint8, 1), : (np.uint8, 2), : (np.uint8, 3)}) elif dtype == : dtype = np.uint8 result_shape += (4, ) temp_shape += (4, ) elif qimage.format() == QtGui.QImage.Format_Indexed8: dtype = np.uint8 else: raise ValueError("qimage2numpy only supports 32bit and 8bit images") buf = qimage.bits().asstring(qimage.byteCount()) result = np.frombuffer(buf, dtype).reshape(temp_shape) if result_shape != temp_shape: result = result[:,:result_shape[1]] if qimage.format() == QtGui.QImage.Format_RGB32 and dtype == np.uint8: result = result[...,:3] result = result[...,::-1] return result
Convert QImage to numpy.ndarray. The dtype defaults to uint8 for QImage.Format_Indexed8 or `bgra_dtype` (i.e. a record array) for 32bit color images. You can pass a different dtype to use, or 'array' to get a 3D uint8 array for color images.
11,998
def _inject_target(self, target_adaptor): target_cls = self._target_types[target_adaptor.type_alias] declared_deps = target_adaptor.dependencies implicit_deps = (Address.parse(s, relative_to=target_adaptor.address.spec_path, subproject_roots=self._address_mapper.subproject_roots) for s in target_cls.compute_dependency_specs(kwargs=target_adaptor.kwargs())) for dep in declared_deps: self._dependent_address_map[dep].add(target_adaptor.address) for dep in implicit_deps: self._implicit_dependent_address_map[dep].add(target_adaptor.address)
Inject a target, respecting all sources of dependencies.
11,999
def trace2(A, B): r A = asarray(A, float) B = asarray(B, float) layout_error = "Wrong matrix layout." if not (len(A.shape) == 2 and len(B.shape) == 2): raise ValueError(layout_error) if not (A.shape[1] == B.shape[0] and A.shape[0] == B.shape[1]): raise ValueError(layout_error) return _sum(A.T * B)
r"""Trace of :math:`\mathrm A \mathrm B^\intercal`. Args: A (array_like): Left-hand side. B (array_like): Right-hand side. Returns: float: Trace of :math:`\mathrm A \mathrm B^\intercal`.