language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def merge_all_edges_between_two_vertices(self, vertex1, vertex2): """ Merges all edge between two supplied vertices into a single edge from a perspective of multi-color merging. Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__merge_all_bgedges_between_two_vertices` :param vertex1: a first out of two vertices edges between which are to be merged together :type vertex1: any python hashable object. :class:`bg.vertex.BGVertex` is expected :param vertex2: a second out of two vertices edges between which are to be merged together :type vertex2: any python hashable object. :class:`bg.vertex.BGVertex` is expected :return: ``None``, performs inplace changes """ self.__merge_all_bgedges_between_two_vertices(vertex1=vertex1, vertex2=vertex2)
python
def satoshi_to_currency(num, currency): """Converts a given number of satoshi to another currency as a formatted string rounded down to the proper number of decimal places. :param num: The number of satoshi. :type num: ``int`` :param currency: One of the :ref:`supported currencies`. :type currency: ``str`` :rtype: ``str`` """ return '{:f}'.format( Decimal( num / Decimal(EXCHANGE_RATES[currency]()) ).quantize( Decimal('0.' + '0' * CURRENCY_PRECISION[currency]), rounding=ROUND_DOWN ).normalize() )
python
def untrace_property(cls, accessor): """ Untraces given class property. :param cls: Class of the property. :type cls: object :param accessor: Property to untrace. :type accessor: property :return: Definition success. :rtype: bool """ if not is_traced(accessor.fget) or not is_traced(accessor.fset) or not is_traced(accessor.fdel): return False name = get_method_name(accessor) setattr(cls, name, property(untracer(accessor.fget), untracer(accessor.fset), untracer(accessor.fdel))) return True
java
public ValueList appendStringSet(String ...val) { super.append(new LinkedHashSet<String>(Arrays.asList(val))); return this; }
python
def _get_response_ms(self): """ Get the duration of the request response cycle is milliseconds. In case of negative duration 0 is returned. """ response_timedelta = now() - self.log['requested_at'] response_ms = int(response_timedelta.total_seconds() * 1000) return max(response_ms, 0)
python
def QueueQueryTasks(self, queue, limit=1): """Retrieves tasks from a queue without leasing them. This is good for a read only snapshot of the tasks. Args: queue: The task queue that this task belongs to, usually client.Queue() where client is the ClientURN object you want to schedule msgs on. limit: Number of values to fetch. Returns: A list of Task() objects. """ prefix = DataStore.QUEUE_TASK_PREDICATE_PREFIX all_tasks = [] for _, serialized, ts in self.ResolvePrefix( queue, prefix, timestamp=DataStore.ALL_TIMESTAMPS): task = rdf_flows.GrrMessage.FromSerializedString(serialized) task.leased_until = ts all_tasks.append(task) return all_tasks[:limit]
python
def _tfidf(x, reduced_term_freq, vocab_size, corpus_size): """Maps the terms in x to their (1/doc_length) * inverse document frequency. Args: x: A `Column` representing int64 values (most likely that are the result of calling string_to_int on a tokenized string). reduced_term_freq: A dense tensor of shape (vocab_size,) that represents the count of the number of documents with each term. So vocab token i ( which is an int) occures in reduced_term_freq[i] examples in the corpus. This means reduced_term_freq should have a count for out-of-vocab tokens vocab_size: An int - the count of vocab used to turn the string into int64s including any out-of-vocab ids corpus_size: A scalar count of the number of documents in the corpus Returns: A `Column` where each int value is mapped to a double equal to (1 if that term appears in that row, 0 otherwise / the number of terms in that row) * the log of (the number of rows in `x` / (1 + the number of rows in `x` where the term appears at least once)) NOTE: This is intented to be used with the feature_column 'sum' combiner to arrive at the true term frequncies. """ def _map_to_vocab_range(x): """Enforces that the vocab_ids in x are positive.""" return tf.SparseTensor( indices=x.indices, values=tf.mod(x.values, vocab_size), dense_shape=x.dense_shape) def _map_to_tfidf(x): """Calculates the inverse document frequency of terms in the corpus. Args: x : a SparseTensor of int64 representing string indices in vocab. Returns: The tf*idf values """ # Add one to the reduced term freqnencies to avoid dividing by zero. idf = tf.log(tf.to_double(corpus_size) / ( 1.0 + tf.to_double(reduced_term_freq))) dense_doc_sizes = tf.to_double(tf.sparse_reduce_sum(tf.SparseTensor( indices=x.indices, values=tf.ones_like(x.values), dense_shape=x.dense_shape), 1)) # For every term in x, divide the idf by the doc size. # The two gathers both result in shape <sum_doc_sizes> idf_over_doc_size = (tf.gather(idf, x.values) / tf.gather(dense_doc_sizes, x.indices[:, 0])) return tf.SparseTensor( indices=x.indices, values=idf_over_doc_size, dense_shape=x.dense_shape) cleaned_input = _map_to_vocab_range(x) weights = _map_to_tfidf(cleaned_input) return tf.to_float(weights)
java
public static CommercePriceList fetchByCompanyId_Last(long companyId, OrderByComparator<CommercePriceList> orderByComparator) { return getPersistence() .fetchByCompanyId_Last(companyId, orderByComparator); }
python
def _get_public_room(self, room_name, invitees: List[User]): """ Obtain a public, canonically named (if possible) room and invite peers """ room_name_full = f'#{room_name}:{self._server_name}' invitees_uids = [user.user_id for user in invitees] for _ in range(JOIN_RETRIES): # try joining room try: room = self._client.join_room(room_name_full) except MatrixRequestError as error: if error.code == 404: self.log.debug( f'No room for peer, trying to create', room_name=room_name_full, error=error, ) else: self.log.debug( f'Error joining room', room_name=room_name, error=error.content, error_code=error.code, ) else: # Invite users to existing room member_ids = {user.user_id for user in room.get_joined_members(force_resync=True)} users_to_invite = set(invitees_uids) - member_ids self.log.debug('Inviting users', room=room, invitee_ids=users_to_invite) for invitee_id in users_to_invite: room.invite_user(invitee_id) self.log.debug('Room joined successfully', room=room) break # if can't, try creating it try: room = self._client.create_room( room_name, invitees=invitees_uids, is_public=True, ) except MatrixRequestError as error: if error.code == 409: msg = ( 'Error creating room, ' 'seems to have been created by peer meanwhile, retrying.' ) else: msg = 'Error creating room, retrying.' self.log.debug( msg, room_name=room_name, error=error.content, error_code=error.code, ) else: self.log.debug('Room created successfully', room=room, invitees=invitees) break else: # if can't join nor create, create an unnamed one room = self._client.create_room( None, invitees=invitees_uids, is_public=True, ) self.log.warning( 'Could not create nor join a named room. Successfuly created an unnamed one', room=room, invitees=invitees, ) return room
python
def wait(hotkey=None, suppress=False, trigger_on_release=False): """ Blocks the program execution until the given hotkey is pressed or, if given no parameters, blocks forever. """ if hotkey: lock = _Event() remove = add_hotkey(hotkey, lambda: lock.set(), suppress=suppress, trigger_on_release=trigger_on_release) lock.wait() remove_hotkey(remove) else: while True: _time.sleep(1e6)
python
def readiterinit(d): """ Prepare to read data with ms.iter* """ # set requested time range based on given parameters starttime_mjd = d['starttime_mjd'] timeskip = d['inttime']*d['nskip'] starttime = qa.getvalue(qa.convert(qa.time(qa.quantity(starttime_mjd+timeskip/(24.*60*60),'d'),form=['ymd'], prec=9)[0], 's'))[0] stoptime = qa.getvalue(qa.convert(qa.time(qa.quantity(starttime_mjd+(timeskip+(d['nints']+1)*d['inttime'])/(24.*60*60), 'd'), form=['ymd'], prec=9)[0], 's'))[0] # nints+1 to be avoid buffer running out and stalling iteration logger.debug('Time of first integration:', qa.time(qa.quantity(starttime_mjd,'d'),form=['ymd'],prec=9)[0]) logger.info('Reading times %s to %s in %d iterations' % (qa.time(qa.quantity(starttime_mjd+timeskip/(24.*60*60),'d'),form=['hms'], prec=9)[0], qa.time(qa.quantity(starttime_mjd+(timeskip+(d['nints']+1)*d['inttime'])/(24.*60*60), 'd'), form=['hms'], prec=9)[0], d['nthread'])) # read data into data structure ms.open(d['filename']) if len(d['spwlist']) == 1: ms.selectinit(datadescid=d['spwlist'][0]) else: ms.selectinit(datadescid=0, reset=True) # reset includes spw in iteration over time selection = {'time': [starttime, stoptime], 'uvdist': [1., 1e10], 'antenna1': d['ants'], 'antenna2': d['ants']} # exclude auto-corrs ms.select(items = selection) ms.selectpolarization(d['pols']) ms.iterinit(['TIME'], 0, d['iterint']*d['nbl']*d['nspw']*d['npol'], adddefaultsortcolumns=False) iterstatus = ms.iterorigin()
python
def _run(self): """ This is the logic to run it all. Heavily influenced by this post: http://nickdesaulniers.github.io/blog/2015/05/25/interpreter-compiler-jit/ :return: """ i = 0 try: while i < len(self.program): if self.program[i] == ">": self._increment_pointer() elif self.program[i] == "<": self._decrement_pointer() elif self.program[i] == "+": self._increment_current_byte() elif self.program[i] == "-": self._decrement_current_byte() elif self.program[i] == ".": self._output_current_byte() elif self.program[i] == ",": self._read_byte() elif self.program[i] == "[": """ if the byte at the data pointer is zero, then instead of moving the instruction pointer forward to the next command, jump it forward to the command after the matching ] command - Wikipedia """ if self.tape[self.pointer] is None or self.tape[self.pointer] == 0: loop = 1 while loop > 0: i += 1 current_instruction = self.program[i] if current_instruction == "]": loop -= 1 elif current_instruction == "[": loop += 1 elif self.program[i] == "]": """ if the byte at the data pointer is nonzero, then instead of moving the instruction pointer forward to the next command, jump it back to the command after the matching [ command. - Wikipedia """ if self.tape[self.pointer] is not None and self.tape[self.pointer] > 0: loop = 1 while loop > 0: i -= 1 current_instruction = self.program[i] if current_instruction == "[": loop -= 1 elif current_instruction == "]": loop += 1 i += 1 except PointerOutOfProgramRange as e: print e.message sys.exit(1) except IndexError as e: print "The program went out of bounds of its instructions" sys.exit(1)
java
@Override public void close() throws IOException { synchronized (lock) { decoder = null; if (in != null) { in.close(); in = null; } } }
java
public static RawFrame createPL132(byte[] data, int offset) throws KNXFormatException { if (data.length - offset == 2) return new PL132Ack(data, offset); return new PL132LData(data, offset); }
python
def splitPrefix(name): """ Split the name into a tuple (I{prefix}, I{name}). The first element in the tuple is I{None} when the name does't have a prefix. @param name: A node name containing an optional prefix. @type name: basestring @return: A tuple containing the (2) parts of I{name} @rtype: (I{prefix}, I{name}) """ if isinstance(name, str) and ':' in name: return tuple(name.split(':', 1)) else: return (None, name)
java
public NotificationChain basicSetDuration(Parameter newDuration, NotificationChain msgs) { Parameter oldDuration = duration; duration = newDuration; if (eNotificationRequired()) { ENotificationImpl notification = new ENotificationImpl(this, Notification.SET, BpsimPackage.SCENARIO_PARAMETERS__DURATION, oldDuration, newDuration); if (msgs == null) msgs = notification; else msgs.add(notification); } return msgs; }
java
@Override public final String printNtz(final String pNumber, final String pDigSep, final String pDigGrSep, final Integer pDigitsInGroup) { if (pNumber == null || "".equals(pNumber)) { return ""; } int dotIdx = pNumber.indexOf("."); String leftWing; String rightWing; if (dotIdx == -1) { leftWing = pNumber; rightWing = null; } else { leftWing = pNumber.substring(0, dotIdx); rightWing = pNumber.substring(dotIdx + 1); } StringBuffer sb = new StringBuffer(); if (leftWing.startsWith("-")) { leftWing = leftWing.substring(1); sb.append("-"); } addLeftWing(leftWing, sb, pDigGrSep, pDigitsInGroup); if (rightWing != null && rightWing.length() > 0 && !"0".equals(rightWing)) { sb.append(pDigSep); int lastIdxZero = rightWing.lastIndexOf("0"); for (int i = 0; i < lastIdxZero; i++) { char ch = rightWing.charAt(i); sb.append(ch); } } return sb.toString(); }
java
public boolean insertBefore(final Node newNode, final Node oldNode) { if (oldNode == null || newNode == null) { return false; } int index = nodes.indexOf(oldNode); if (index != -1) { // Remove the parent from the new node if one exists if (newNode.getParent() != null) { newNode.removeParent(); } newNode.setParent(this); // Add the node to the relevant list if (newNode instanceof Level) { levels.add((Level) newNode); } else if (newNode instanceof SpecTopic) { topics.add((SpecTopic) newNode); } // Insert the node if (index == 0) { nodes.addFirst(newNode); } else { nodes.add(index - 1, newNode); } return true; } else { return false; } }
python
def from_json(cls, json_doc): """Parse a JSON string and build an entity.""" try: d = json.load(json_doc) except AttributeError: # catch the read() error d = json.loads(json_doc) return cls.from_dict(d)
java
public Request<UserBlocks> get(String userId) { Asserts.assertNotNull(userId, "user id"); String url = baseUrl .newBuilder() .addPathSegments("api/v2/user-blocks") .addPathSegment(userId) .build() .toString(); CustomRequest<UserBlocks> request = new CustomRequest<>(client, url, "GET", new TypeReference<UserBlocks>() { }); request.addHeader("Authorization", "Bearer " + apiToken); return request; }
python
def get_all_derivatives(self, address): """Get all targets derived directly or indirectly from the specified target. Note that the specified target itself is not returned. :API: public """ ret = [] direct = self.get_direct_derivatives(address) ret.extend(direct) for t in direct: ret.extend(self.get_all_derivatives(t.address)) return ret
python
def to_table_data(self): """ :raises ValueError: :raises pytablereader.error.ValidationError: """ self._validate_source_data() for table_key, json_records in six.iteritems(self._buffer): headers = sorted(six.viewkeys(json_records)) self._loader.inc_table_count() self._table_key = table_key yield TableData( self._make_table_name(), headers, zip(*[json_records.get(header) for header in headers]), dp_extractor=self._loader.dp_extractor, type_hints=self._extract_type_hints(headers), )
java
public HllUpdate addAllBinary(Collection<BinaryValue> elements) { if (elements == null) { throw new IllegalArgumentException("Elements cannot be null"); } for (BinaryValue element : elements) { this.adds.add(element); } return this; }
python
def corr(dataset, column, method="pearson"): """ Compute the correlation matrix with specified method using dataset. :param dataset: A Dataset or a DataFrame. :param column: The name of the column of vectors for which the correlation coefficient needs to be computed. This must be a column of the dataset, and it must contain Vector objects. :param method: String specifying the method to use for computing correlation. Supported: `pearson` (default), `spearman`. :return: A DataFrame that contains the correlation matrix of the column of vectors. This DataFrame contains a single row and a single column of name '$METHODNAME($COLUMN)'. >>> from pyspark.ml.linalg import Vectors >>> from pyspark.ml.stat import Correlation >>> dataset = [[Vectors.dense([1, 0, 0, -2])], ... [Vectors.dense([4, 5, 0, 3])], ... [Vectors.dense([6, 7, 0, 8])], ... [Vectors.dense([9, 0, 0, 1])]] >>> dataset = spark.createDataFrame(dataset, ['features']) >>> pearsonCorr = Correlation.corr(dataset, 'features', 'pearson').collect()[0][0] >>> print(str(pearsonCorr).replace('nan', 'NaN')) DenseMatrix([[ 1. , 0.0556..., NaN, 0.4004...], [ 0.0556..., 1. , NaN, 0.9135...], [ NaN, NaN, 1. , NaN], [ 0.4004..., 0.9135..., NaN, 1. ]]) >>> spearmanCorr = Correlation.corr(dataset, 'features', method='spearman').collect()[0][0] >>> print(str(spearmanCorr).replace('nan', 'NaN')) DenseMatrix([[ 1. , 0.1054..., NaN, 0.4 ], [ 0.1054..., 1. , NaN, 0.9486... ], [ NaN, NaN, 1. , NaN], [ 0.4 , 0.9486... , NaN, 1. ]]) """ sc = SparkContext._active_spark_context javaCorrObj = _jvm().org.apache.spark.ml.stat.Correlation args = [_py2java(sc, arg) for arg in (dataset, column, method)] return _java2py(sc, javaCorrObj.corr(*args))
java
public static String readString(ChannelBuffer buffer, int length) { return readString(buffer, length, CharsetUtil.UTF_8); // char[] chars = new char[length]; // for (int i = 0; i < length; i++) // { // chars[i] = buffer.readChar(); // } // return new String(chars); }
python
def add_srec(self, records, overwrite=False): """Add given Motorola S-Records string. Set `overwrite` to ``True`` to allow already added data to be overwritten. """ for record in StringIO(records): type_, address, size, data = unpack_srec(record.strip()) if type_ == '0': self._header = data elif type_ in '123': address *= self.word_size_bytes self._segments.add(_Segment(address, address + size, bytearray(data), self.word_size_bytes), overwrite) elif type_ in '789': self.execution_start_address = address
java
private void addFirstOfType() { for (Node node : nodes) { Index index = helper.getIndexInParent(node, true); if (index.index == 0) result.add(node); } }
java
public boolean isTrue(Configuration configuration) { for (int i = 0; i < this.variables.size(); ++i) { Variable variable = this.variables.get(i); if (configuration.valueAt(variable.getIndex()) == 1 && !this.negatedFlags.get(i)) return true; if (configuration.valueAt(variable.getIndex()) == 0 && this.negatedFlags.get(i)) return true; } return false; }
java
public Collection<Tenant> getTenants() { checkServiceState(); Map<String, TenantDefinition> tenantMap = getAllTenantDefs(); List<Tenant> result = new ArrayList<>(); for (String tenantName : tenantMap.keySet()) { result.add(new Tenant(tenantMap.get(tenantName))); } return result; }
java
public List<String> getFiles(GerritQueryHandler gerritQueryHandler) { if (files == null) { files = FileHelper.getFilesByChange(gerritQueryHandler, id); } return files; }
java
public void savePCAToFile(String PCAFileName) throws Exception { if (isPcaInitialized) { throw new Exception("Cannot save, PCA is initialized!"); } if (V_t == null) { throw new Exception("Cannot save to file, PCA matrix is null!"); } BufferedWriter out = new BufferedWriter(new FileWriter(PCAFileName)); // the first line of the file contains the training sample means per component for (int i = 0; i < sampleSize - 1; i++) { out.write(means.get(i) + " "); } out.write(means.get(sampleSize - 1) + "\n"); // the second line of the file contains the eigenvalues in descending order for (int i = 0; i < numComponents - 1; i++) { out.write(W.get(i, i) + " "); } out.write(W.get(numComponents - 1, numComponents - 1) + "\n"); // the next lines of the file contain the eigenvectors in descending eigenvalue order for (int i = 0; i < numComponents; i++) { for (int j = 0; j < sampleSize - 1; j++) { out.write(V_t.get(i, j) + " "); } out.write(V_t.get(i, sampleSize - 1) + "\n"); } out.close(); }
python
def _children(self): """Yield all direct children of this object.""" for codeobj in self.parameters: yield codeobj for codeobj in self.body._children(): yield codeobj
python
def _use_inf_as_na(key): """Option change callback for na/inf behaviour Choose which replacement for numpy.isnan / -numpy.isfinite is used. Parameters ---------- flag: bool True means treat None, NaN, INF, -INF as null (old way), False means None and NaN are null, but INF, -INF are not null (new way). Notes ----- This approach to setting global module values is discussed and approved here: * http://stackoverflow.com/questions/4859217/ programmatically-creating-variables-in-python/4859312#4859312 """ from pandas._config import get_option flag = get_option(key) if flag: globals()['_isna'] = _isna_old else: globals()['_isna'] = _isna_new
java
public void zone_zoneName_redirection_id_PUT(String zoneName, Long id, OvhRedirection body) throws IOException { String qPath = "/domain/zone/{zoneName}/redirection/{id}"; StringBuilder sb = path(qPath, zoneName, id); exec(qPath, "PUT", sb.toString(), body); }
java
public static Complex_F64[] findRoots(double... coefficients) { int N = coefficients.length-1; // Construct the companion matrix DMatrixRMaj c = new DMatrixRMaj(N,N); double a = coefficients[N]; for( int i = 0; i < N; i++ ) { c.set(i,N-1,-coefficients[i]/a); } for( int i = 1; i < N; i++ ) { c.set(i,i-1,1); } // use generalized eigenvalue decomposition to find the roots EigenDecomposition_F64<DMatrixRMaj> evd = DecompositionFactory_DDRM.eig(N,false); evd.decompose(c); Complex_F64[] roots = new Complex_F64[N]; for( int i = 0; i < N; i++ ) { roots[i] = evd.getEigenvalue(i); } return roots; }
python
def upload_tree(self, src, dst, ignore=None): """Recursively upload a directory tree. Although similar to shutil.copytree we don't follow symlinks. """ names = os.listdir(src) if ignore is not None: ignored_names = ignore(src, names) else: ignored_names = set() try: self.conn.mkd(dst) except error_perm: pass errors = [] for name in names: if name in ignored_names: continue src_name = os.path.join(src, name) dst_name = os.path.join(dst, name) try: if os.path.islink(src_name): pass elif os.path.isdir(src_name): self.upload_tree(src_name, dst_name, ignore) else: # Will raise a SpecialFileError for unsupported file types self.put(src_name, dst_name) except Exception as why: errors.append((src_name, dst_name, str(why))) return dst
java
public static File findInstanceResourcesDirectory( File applicationFilesDirectory, Component component ) { File root = new File( applicationFilesDirectory, Constants.PROJECT_DIR_GRAPH ); File result = new File( "No recipe directory." ); Set<Component> alreadyChecked = new HashSet<> (); for( Component c = component; c != null; c = c.getExtendedComponent()) { // Prevent infinite loops for exotic cases if( alreadyChecked.contains( c )) break; alreadyChecked.add( c ); if(( result = new File( root, c.getName())).exists()) break; } return result; }
java
public void addCache(String cacheName, AsyncLoadingCache cache) { children.put(cacheName, cache.synchronous()); }
java
public static base_responses update(nitro_service client, nsacl resources[]) throws Exception { base_responses result = null; if (resources != null && resources.length > 0) { nsacl updateresources[] = new nsacl[resources.length]; for (int i=0;i<resources.length;i++){ updateresources[i] = new nsacl(); updateresources[i].aclname = resources[i].aclname; updateresources[i].aclaction = resources[i].aclaction; updateresources[i].srcip = resources[i].srcip; updateresources[i].srcipop = resources[i].srcipop; updateresources[i].srcipval = resources[i].srcipval; updateresources[i].srcport = resources[i].srcport; updateresources[i].srcportop = resources[i].srcportop; updateresources[i].srcportval = resources[i].srcportval; updateresources[i].destip = resources[i].destip; updateresources[i].destipop = resources[i].destipop; updateresources[i].destipval = resources[i].destipval; updateresources[i].destport = resources[i].destport; updateresources[i].destportop = resources[i].destportop; updateresources[i].destportval = resources[i].destportval; updateresources[i].srcmac = resources[i].srcmac; updateresources[i].protocol = resources[i].protocol; updateresources[i].protocolnumber = resources[i].protocolnumber; updateresources[i].icmptype = resources[i].icmptype; updateresources[i].icmpcode = resources[i].icmpcode; updateresources[i].vlan = resources[i].vlan; updateresources[i].Interface = resources[i].Interface; updateresources[i].priority = resources[i].priority; updateresources[i].logstate = resources[i].logstate; updateresources[i].ratelimit = resources[i].ratelimit; updateresources[i].established = resources[i].established; } result = update_bulk_request(client, updateresources); } return result; }
java
private void launchWithConfigurationString( final String evaluatorConfiguration, final String contextConfiguration, final Optional<String> serviceConfiguration, final Optional<String> taskConfiguration) { try (final LoggingScope lb = loggingScopeFactory.evaluatorLaunch(this.getId())) { final Configuration submissionEvaluatorConfiguration = makeEvaluatorConfiguration( contextConfiguration, Optional.ofNullable(evaluatorConfiguration), serviceConfiguration, taskConfiguration); resourceBuildAndLaunch(submissionEvaluatorConfiguration); } }
python
def _process_transfer(self, ud, ase, offsets, data): # type: (Uploader, blobxfer.models.upload.Descriptor, # blobxfer.models.azure.StorageEntity, # blobxfer.models.upload.Offsets, bytes) -> None """Process transfer instructions :param Uploader self: this :param blobxfer.models.upload.Descriptor ud: upload descriptor :param blobxfer.models.azure.StorageEntity ase: Storage entity :param blobxfer.models.upload.Offsets offsets: offsets :param bytes data: data to upload """ # issue put range self._put_data(ud, ase, offsets, data) # accounting with self._transfer_lock: if ud.local_path.use_stdin: self._upload_bytes_total += offsets.num_bytes elif offsets.chunk_num == 0: self._upload_bytes_total += ase.size self._upload_bytes_sofar += offsets.num_bytes self._transfer_set.remove( blobxfer.operations.upload.Uploader.create_unique_transfer_id( ud.local_path, ase, offsets)) ud.complete_offset_upload(offsets.chunk_num) # add descriptor back to upload queue only for append blobs if ud.entity.mode == blobxfer.models.azure.StorageModes.Append: self._upload_queue.put(ud) # update progress bar self._update_progress_bar(stdin=ud.local_path.use_stdin)
java
public CollectionInfo getCollectionInfo(int collectionId, String language) throws MovieDbException { TmdbParameters parameters = new TmdbParameters(); parameters.add(Param.ID, collectionId); parameters.add(Param.LANGUAGE, language); URL url = new ApiUrl(apiKey, MethodBase.COLLECTION).buildUrl(parameters); String webpage = httpTools.getRequest(url); try { return MAPPER.readValue(webpage, CollectionInfo.class); } catch (IOException ex) { throw new MovieDbException(ApiExceptionType.MAPPING_FAILED, "Failed to get collection information", url, ex); } }
python
def unlock(self, passphrase, encrypted_seed=None): """Unlock the Wallet by decrypting the primary_private_seed with the supplied passphrase. Once unlocked, the private seed is accessible in memory and calls to `account.pay` will succeed. This is a necessary step for creating transactions. Args: passphrase (str): The passphrase the User used to encrypt this wallet. encrypted_seed (dict): A dictionary of the form {'ciphertext': longhexvalue, 'iterations': integer of pbkdf2 derivations, 'nonce': 24-byte hex value 'salt': 16-byte hex value} this dict represents an private seed (not a master key) encrypted with the `passphrase` using pbkdf2. You can obtain this value with wallet.generate. If this value is supplied, it overwrites (locally only) the encrypted primary_private_seed value, allowing you to load in a primary key that you didn't store with Gem. Note that the key MUST match the pubkey that this wallet was created with. Returns: self """ wallet = self.resource if not encrypted_seed: encrypted_seed = wallet.primary_private_seed try: if encrypted_seed['nonce']: primary_seed = NaclPassphraseBox.decrypt( passphrase, encrypted_seed) else: primary_seed = PassphraseBox.decrypt( passphrase, encrypted_seed) except: raise InvalidPassphraseError() self.multi_wallet = MultiWallet( private_seeds={'primary': primary_seed}, public={'cosigner': wallet.cosigner_public_seed, 'backup': wallet.backup_public_seed}) return self
java
public static String cleanSubOptions(String optionPrefix, Set<String> allowedSubOptions, String s) { StringBuilder sb = new StringBuilder(); if (!s.startsWith(optionPrefix)) return ""; StringTokenizer st = new StringTokenizer(s.substring(optionPrefix.length()), ","); while (st.hasMoreTokens()) { String o = st.nextToken(); int p = o.indexOf('='); if (p>0) { String key = o.substring(0,p); String val = o.substring(p+1); if (allowedSubOptions.contains(key)) { if (sb.length() > 0) sb.append(','); sb.append(key+"="+val); } } } return sb.toString(); }
java
protected final <T extends ChronoEntity<T>> UnitRule<T> derive(T entity) { return this.derive(entity.getChronology()); }
python
def value_from_datadict(self, data, files, name): """Ensure the payload is a list of values. In the case of a sub form, we need to ensure the data is returned as a list and not a dictionary. When a dict is found in the given data, we need to ensure the data is converted to a list perseving the field order. """ if name in data: payload = data.get(name) if isinstance(payload, (dict,)): # Make sure we get the data in the correct roder return [payload.get(f.name) for f in self.fields] return payload return super(FormFieldWidget, self).value_from_datadict(data, files, name)
python
def register_suffixes (suffixes, type): """ Specifies that targets with suffix from 'suffixes' have the type 'type'. If a different type is already specified for any of syffixes, issues an error. """ assert is_iterable_typed(suffixes, basestring) assert isinstance(type, basestring) for s in suffixes: if s in __suffixes_to_types: old_type = __suffixes_to_types [s] if old_type != type: raise BaseException ('Attempting to specify type for suffix "%s"\nOld type: "%s", New type "%s"' % (s, old_type, type)) else: __suffixes_to_types [s] = type
java
public Paint getTopShadowGradient(Shape s) { Rectangle2D bounds = s.getBounds2D(); float minY = (float) bounds.getMinY(); float maxY = (float) bounds.getMaxY(); float midX = (float) bounds.getCenterX(); return new LinearGradientPaint(midX, minY, midX, maxY, (new float[] { 0f, 1f }), new Color[] { innerShadow.top, transparentColor }); }
python
def resize(self, size, interp='nearest'): """Resize the image. Parameters ---------- size : int, float, or tuple * int - Percentage of current size. * float - Fraction of current size. * tuple - Size of the output image. interp : :obj:`str`, optional Interpolation to use for re-sizing ('nearest', 'lanczos', 'bilinear', 'bicubic', or 'cubic') """ resized_data = sm.imresize(self.data, size, interp=interp, mode='L') return SegmentationImage(resized_data, self._frame)
python
def get_ed25519ll(): """Lazy import-and-test of ed25519 module""" global ed25519ll if not ed25519ll: try: import ed25519ll # fast (thousands / s) except (ImportError, OSError): # pragma nocover from . import ed25519py as ed25519ll # pure Python (hundreds / s) test() return ed25519ll
python
def bencode(canonical): ''' Turns a dictionary into a bencoded str with alphabetized keys e.g., {'spam': 'eggs', 'cow': 'moo'} --> d3:cow3:moo4:spam4:eggse ''' in_dict = dict(canonical) def encode_str(in_str): out_str = str(len(in_str)) + ':' + in_str return out_str def encode_int(in_int): out_str = str('i' + str(in_int) + 'e') return out_str def encode_list(in_list): out_str = 'l' for item in in_list: out_str += encode_item(item) else: out_str += 'e' return out_str def encode_dict(in_dict): out_str = 'd' keys = sorted(in_dict.keys()) for key in keys: val = in_dict[key] out_str = out_str + encode_item(key) + encode_item(val) else: out_str += 'e' return out_str def encode_item(x): if isinstance(x, str): return encode_str(x) elif isinstance(x, int): return encode_int(x) elif isinstance(x, list): return encode_list(x) elif isinstance(x, dict): return encode_dict(x) return encode_item(in_dict)
java
@Override public CPDefinitionInventory findByCPDefinitionId(long CPDefinitionId) throws NoSuchCPDefinitionInventoryException { CPDefinitionInventory cpDefinitionInventory = fetchByCPDefinitionId(CPDefinitionId); if (cpDefinitionInventory == null) { StringBundler msg = new StringBundler(4); msg.append(_NO_SUCH_ENTITY_WITH_KEY); msg.append("CPDefinitionId="); msg.append(CPDefinitionId); msg.append("}"); if (_log.isDebugEnabled()) { _log.debug(msg.toString()); } throw new NoSuchCPDefinitionInventoryException(msg.toString()); } return cpDefinitionInventory; }
java
private GrammarInfo [] scanForGrammars () throws MojoExecutionException { if (!getSourceDirectory ().isDirectory ()) { return null; } GrammarInfo [] grammarInfos; getLog ().debug ("Scanning for grammars: " + getSourceDirectory ()); try { final GrammarDirectoryScanner scanner = new GrammarDirectoryScanner (); scanner.setSourceDirectory (getSourceDirectory ()); scanner.setIncludes (getIncludes ()); scanner.setExcludes (getExcludes ()); scanner.setOutputDirectory (getOutputDirectory ()); scanner.setParserPackage (getParserPackage ()); scanner.setStaleMillis (getStaleMillis ()); scanner.scan (); grammarInfos = scanner.getIncludedGrammars (); } catch (final Exception e) { throw new MojoExecutionException ("Failed to scan for grammars: " + getSourceDirectory (), e); } getLog ().debug ("Found grammars: " + Arrays.asList (grammarInfos)); return grammarInfos; }
python
def setParams(self, minSupport=0.3, minConfidence=0.8, itemsCol="items", predictionCol="prediction", numPartitions=None): """ setParams(self, minSupport=0.3, minConfidence=0.8, itemsCol="items", \ predictionCol="prediction", numPartitions=None) """ kwargs = self._input_kwargs return self._set(**kwargs)
python
def url_to_path(self, url): """Convert schema URL to path. :param url: The schema URL. :returns: The schema path or ``None`` if the schema can't be resolved. """ parts = urlsplit(url) try: loader, args = self.url_map.bind(parts.netloc).match(parts.path) path = args.get('path') if loader == 'schema' and path in self.schemas: return path except HTTPException: return None
java
private void deleteStaleBackups() throws Exception { UfsStatus[] statuses = mUfs.listStatus(mBackupDir); if (statuses.length <= mRetainedFiles) { return; } // Sort the backup files according to create time from oldest to newest TreeMap<Instant, String> timeToFile = new TreeMap<>((a, b) -> ( a.isBefore(b) ? -1 : a.isAfter(b) ? 1 : 0)); for (UfsStatus status : statuses) { if (status.isFile()) { Matcher matcher = BackupManager.BACKUP_FILE_PATTERN.matcher(status.getName()); if (matcher.matches()) { timeToFile.put(Instant.ofEpochMilli(Long.parseLong(matcher.group(1))), status.getName()); } } } int toDeleteFileNum = timeToFile.size() - mRetainedFiles; if (toDeleteFileNum <= 0) { return; } for (int i = 0; i < toDeleteFileNum; i++) { String toDeleteFile = PathUtils.concatPath(mBackupDir, timeToFile.pollFirstEntry().getValue()); mUfs.deleteExistingFile(toDeleteFile); } LOG.info("Deleted {} stale metadata backup files at {}", toDeleteFileNum, mBackupDir); }
python
def active_aliases(self): """Get a filtered list of aliases based on configuration. Returns aliases and their mappings that are defined in the `SEARCH_MAPPINGS` config variable. If the `SEARCH_MAPPINGS` is set to `None` (the default), all aliases are included. """ whitelisted_aliases = self.app.config.get('SEARCH_MAPPINGS') if whitelisted_aliases is None: return self.aliases else: return {k: v for k, v in self.aliases.items() if k in whitelisted_aliases}
java
private Animator preparePressedAnimation() { Animator animation = ObjectAnimator.ofFloat(drawable, CircularProgressDrawable.CIRCLE_SCALE_PROPERTY, drawable.getCircleScale(), 0.65f); animation.setDuration(120); return animation; }
python
def fit(self, X, y=None, **kwargs): """ Fits the estimator to discover the feature importances described by the data, then draws those importances as a bar plot. Parameters ---------- X : ndarray or DataFrame of shape n x m A matrix of n instances with m features y : ndarray or Series of length n An array or series of target or class values kwargs : dict Keyword arguments passed to the fit method of the estimator. Returns ------- self : visualizer The fit method must always return self to support pipelines. """ super(FeatureImportances, self).fit(X, y, **kwargs) # Get the feature importances from the model self.feature_importances_ = self._find_importances_param() # Get the classes from the model if is_classifier(self): self.classes_ = self._find_classes_param() else: self.classes_ = None self.stack = False # If self.stack = True and feature importances is a multidim array, # we're expecting a shape of (n_classes, n_features) # therefore we flatten by taking the average by # column to get shape (n_features,) (see LogisticRegression) if not self.stack and self.feature_importances_.ndim > 1: self.feature_importances_ = np.mean(self.feature_importances_, axis=0) warnings.warn(( "detected multi-dimensional feature importances but stack=False, " "using mean to aggregate them." ), YellowbrickWarning) # Apply absolute value filter before normalization if self.absolute: self.feature_importances_ = np.abs(self.feature_importances_) # Normalize features relative to the maximum if self.relative: maxv = np.abs(self.feature_importances_).max() self.feature_importances_ /= maxv self.feature_importances_ *= 100.0 # Create labels for the feature importances # NOTE: this code is duplicated from MultiFeatureVisualizer if self.labels is None: # Use column names if a dataframe if is_dataframe(X): self.features_ = np.array(X.columns) # Otherwise use the column index as the labels else: _, ncols = X.shape self.features_ = np.arange(0, ncols) else: self.features_ = np.array(self.labels) # Sort the features and their importances if self.stack: sort_idx = np.argsort(np.mean(self.feature_importances_, 0)) self.features_ = self.features_[sort_idx] self.feature_importances_ = self.feature_importances_[:, sort_idx] else: sort_idx = np.argsort(self.feature_importances_) self.features_ = self.features_[sort_idx] self.feature_importances_ = self.feature_importances_[sort_idx] # Draw the feature importances self.draw() return self
python
def delete_user(self, user_descriptor): """DeleteUser. [Preview API] Disables a user. :param str user_descriptor: The descriptor of the user to delete. """ route_values = {} if user_descriptor is not None: route_values['userDescriptor'] = self._serialize.url('user_descriptor', user_descriptor, 'str') self._send(http_method='DELETE', location_id='005e26ec-6b77-4e4f-a986-b3827bf241f5', version='5.1-preview.1', route_values=route_values)
java
@Override public GetPasswordDataResult getPasswordData(GetPasswordDataRequest request) { request = beforeClientExecution(request); return executeGetPasswordData(request); }
python
def stop(self): """ Stop services and requestors and then connection. :return: self """ LOGGER.debug("rabbitmq.Driver.stop") for requester in self.requester_registry: requester.stop() self.requester_registry.clear() for service in self.services_registry: if service.is_started: service.stop() self.services_registry.clear() pykka.ActorRegistry.stop_all() return self
java
public void marshall(FieldStats fieldStats, ProtocolMarshaller protocolMarshaller) { if (fieldStats == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(fieldStats.getMin(), MIN_BINDING); protocolMarshaller.marshall(fieldStats.getMax(), MAX_BINDING); protocolMarshaller.marshall(fieldStats.getCount(), COUNT_BINDING); protocolMarshaller.marshall(fieldStats.getMissing(), MISSING_BINDING); protocolMarshaller.marshall(fieldStats.getSum(), SUM_BINDING); protocolMarshaller.marshall(fieldStats.getSumOfSquares(), SUMOFSQUARES_BINDING); protocolMarshaller.marshall(fieldStats.getMean(), MEAN_BINDING); protocolMarshaller.marshall(fieldStats.getStddev(), STDDEV_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
python
def attention_lm_moe_base_ae(): """Base model with attention expert.""" hparams = attention_lm_moe_base_long_seq() hparams.attention_type = AttentionType.LOCAL_EXPERTS hparams.learning_rate = 0.05 hparams.learning_rate_warmup_steps = 10000 # According to noam, ("n", "da") seems better for harder-to-learn models # hparams.layer_preprocess_sequence = "n" # hparams.layer_postprocess_sequence = "da" return hparams
python
def cls_register(cls, frameset, new_class, init_args, name=None): """ Register a new FrameSet or FrameSet subclass as a member/attribute of a class. Returns the new FrameSet or FrameSet subclass. Arguments: frameset : An existing FrameSet, or an iterable of strings. init_args : A list of properties from the `frameset` to try to use for initializing the new FrameSet. new_class : The class type to initialize. name : New name for the FrameSet, also used as the classes attribute name. If the `frameset` object has not `name` attribute, this argument is required. It must not be empty when given. """ name = name or getattr(frameset, 'name', None) if name is None: raise ValueError( '`name` is needed when the `frameset` has no name attribute.' ) kwargs = {'name': name} for initarg in init_args: kwargs[initarg] = getattr(frameset, initarg, None) newframeset = new_class(frameset, **kwargs) # Mark this FrameSet/BarSet as a registered item (not basic/original). newframeset._registered = True setattr(cls, name, newframeset) return newframeset
python
def update_md5(filenames): """Update our built-in md5 registry""" import re for name in filenames: base = os.path.basename(name) f = open(name,'rb') md5_data[base] = md5(f.read()).hexdigest() f.close() data = [" %r: %r,\n" % it for it in md5_data.items()] data.sort() repl = "".join(data) import inspect srcfile = inspect.getsourcefile(sys.modules[__name__]) f = open(srcfile, 'rb'); src = f.read(); f.close() match = re.search("\nmd5_data = {\n([^}]+)}", src) if not match: print >>sys.stderr, "Internal error!" sys.exit(2) src = src[:match.start(1)] + repl + src[match.end(1):] f = open(srcfile,'w') f.write(src) f.close()
java
public JodaBeanSer withShortTypes(boolean shortTypes) { return new JodaBeanSer(indent, newLine, converter, iteratorFactory, shortTypes, deserializers, includeDerived); }
java
public java.util.List<String> getSpotFleetRequestIds() { if (spotFleetRequestIds == null) { spotFleetRequestIds = new com.amazonaws.internal.SdkInternalList<String>(); } return spotFleetRequestIds; }
java
public final void mINSERT() throws RecognitionException { try { int _type = INSERT; int _channel = DEFAULT_TOKEN_CHANNEL; // hql.g:36:8: ( 'insert' ) // hql.g:36:10: 'insert' { match("insert"); if (state.failed) return; } state.type = _type; state.channel = _channel; } finally { // do for sure before leaving } }
java
public double distanceFrom(LatLong end) { double dLat = (end.getLatitude() - getLatitude()) * Math.PI / 180; double dLon = (end.getLongitude() - getLongitude()) * Math.PI / 180; double a = Math.sin(dLat / 2) * Math.sin(dLat / 2) + Math.cos(getLatitude() * Math.PI / 180) * Math.cos(end.getLatitude() * Math.PI / 180) * Math.sin(dLon / 2) * Math.sin(dLon / 2); double c = 2.0 * Math.atan2(Math.sqrt(a), Math.sqrt(1 - a)); double d = EarthRadiusMeters * c; return d; }
java
@ObjectiveCName("forwardContentContentWithPeer:withContent:") public void forwardContent(Peer peer, AbsContent content) { modules.getMessagesModule().forwardContent(peer, content); }
python
def _has_message(self): """ Whether or not we have messages available for processing. """ sep = protocol.MINIMAL_LINE_SEPARATOR.encode(self.encoding) return sep in self._receive_buffer
java
public void setGroupMembers(List<String> membershipInfo) { for (String line : membershipInfo) { String[] fields = line.split("\\|"); if (fields.length < 2) { continue; } Set<String> users = StringUtils.commaDelimitedListToSet(fields[1]); String groupName = fields[0]; groups.putIfAbsent(groupName, null); boolean groupAdmin = (3 <= fields.length && "write".equalsIgnoreCase(fields[2])) ? true : false; if (groupAdmin) { groupAdmins.put(groupName, users); } else { groupMembers.put(groupName, users); } } logger.debug("groups: " + groups); logger.debug("admins: " + groupAdmins + ", members: " + groupMembers); }
python
def set_hostname(self, value=None, default=False, disable=False): """Configures the global system hostname setting EosVersion: 4.13.7M Args: value (str): The hostname value default (bool): Controls use of the default keyword disable (bool): Controls the use of the no keyword Returns: bool: True if the commands are completed successfully """ cmd = self.command_builder('hostname', value=value, default=default, disable=disable) return self.configure(cmd)
java
private List<Partition> createUserSpecifiedPartitions() { List<Partition> partitions = new ArrayList<>(); List<String> watermarkPoints = state.getPropAsList(USER_SPECIFIED_PARTITIONS); boolean isEarlyStopped = state.getPropAsBoolean(IS_EARLY_STOPPED); if (watermarkPoints == null || watermarkPoints.size() == 0 ) { LOG.info("There should be some partition points"); long defaultWatermark = ConfigurationKeys.DEFAULT_WATERMARK_VALUE; partitions.add(new Partition(defaultWatermark, defaultWatermark, true, true)); return partitions; } WatermarkType watermarkType = WatermarkType.valueOf( state.getProp(ConfigurationKeys.SOURCE_QUERYBASED_WATERMARK_TYPE, ConfigurationKeys.DEFAULT_WATERMARK_TYPE) .toUpperCase()); long lowWatermark = adjustWatermark(watermarkPoints.get(0), watermarkType); long highWatermark = ConfigurationKeys.DEFAULT_WATERMARK_VALUE; // Only one partition point specified if (watermarkPoints.size() == 1) { if (watermarkType != WatermarkType.SIMPLE) { String timeZone = this.state.getProp(ConfigurationKeys.SOURCE_TIMEZONE); String currentTime = Utils.dateTimeToString(getCurrentTime(timeZone), WATERMARKTIMEFORMAT, timeZone); highWatermark = adjustWatermark(currentTime, watermarkType); } partitions.add(new Partition(lowWatermark, highWatermark, true, false)); return partitions; } int i; for (i = 1; i < watermarkPoints.size() - 1; i++) { highWatermark = adjustWatermark(watermarkPoints.get(i), watermarkType); partitions.add(new Partition(lowWatermark, highWatermark, true)); lowWatermark = highWatermark; } // Last partition highWatermark = adjustWatermark(watermarkPoints.get(i), watermarkType); ExtractType extractType = ExtractType.valueOf(this.state.getProp(ConfigurationKeys.SOURCE_QUERYBASED_EXTRACT_TYPE).toUpperCase()); // If it is early stop, we should not remove upper bounds if ((isFullDump() || isSnapshot(extractType)) && !isEarlyStopped) { // The upper bounds can be removed for last work unit partitions.add(new Partition(lowWatermark, highWatermark, true, false)); } else { // The upper bounds can not be removed for last work unit partitions.add(new Partition(lowWatermark, highWatermark, true, true)); } return partitions; }
python
def isIsosceles(self): ''' True iff two side lengths are equal, boolean. ''' return (self.a == self.b) or (self.a == self.c) or (self.b == self.c)
python
def check_overscan(xstart, xsize, total_prescan_pixels=24, total_science_pixels=4096): """Check image for bias columns. Parameters ---------- xstart : int Starting column of the readout in detector coordinates. xsize : int Number of columns in the readout. total_prescan_pixels : int Total prescan pixels for a single amplifier on a detector. Default is 24 for WFC. total_science_pixels : int Total science pixels across a detector. Default is 4096 for WFC (across two amplifiers). Returns ------- hasoverscan : bool Indication if there are bias columns in the image. leading : int Number of bias columns on the A/C amplifiers side of the CCDs ("TRIMX1" in ``OSCNTAB``). trailing : int Number of bias columns on the B/D amplifiers side of the CCDs ("TRIMX2" in ``OSCNTAB``). """ hasoverscan = False leading = 0 trailing = 0 if xstart < total_prescan_pixels: hasoverscan = True leading = abs(xstart - total_prescan_pixels) if (xstart + xsize) > total_science_pixels: hasoverscan = True trailing = abs(total_science_pixels - (xstart + xsize - total_prescan_pixels)) return hasoverscan, leading, trailing
java
static double rgba( double r, double g, double b, double a ) { return Double.longBitsToDouble( Math.round( a * 0xFFFF ) << 48 | (colorLargeDigit(r) << 32) | (colorLargeDigit(g) << 16) | colorLargeDigit(b) ); }
python
def role_show(endpoint_id, role_id): """ Executor for `globus endpoint role show` """ client = get_client() role = client.get_endpoint_role(endpoint_id, role_id) formatted_print( role, text_format=FORMAT_TEXT_RECORD, fields=( ("Principal Type", "principal_type"), ("Principal", lookup_principal), ("Role", "role"), ), )
java
@Override public EClass getIfcHumidifier() { if (ifcHumidifierEClass == null) { ifcHumidifierEClass = (EClass) EPackage.Registry.INSTANCE.getEPackage(Ifc4Package.eNS_URI).getEClassifiers() .get(314); } return ifcHumidifierEClass; }
python
def rename_edges(self, old_node_name, new_node_name): """ Change references to a node in existing edges. Args: old_node_name (str): The old name for the node. new_node_name (str): The new name for the node. """ graph = self.graph for node, edges in graph.items(): if node == old_node_name: graph[new_node_name] = copy(edges) del graph[old_node_name] else: if old_node_name in edges: edges.remove(old_node_name) edges.add(new_node_name)
python
def log(self, level, msg): """ Logs the inputed message with the given level. :param level | <int> | logging level value msg | <unicode> :return <bool> success """ if self.isDestroyed(): return locker = QtCore.QMutexLocker(self._mutex) try: msg = projex.text.nativestring(msg) self.moveCursor(QtGui.QTextCursor.End) self.setCurrentMode(level) if self.textCursor().block().text(): self.insertPlainText('\n') self.insertPlainText(msg.lstrip('\n\r')) self.scrollToEnd() except RuntimeError: return if not self.signalsBlocked(): self.messageLogged.emit(level, msg) return True
python
def is_merge_origin(self): """True if cell is top-left in merged cell range.""" if self.gridSpan > 1 and not self.vMerge: return True if self.rowSpan > 1 and not self.hMerge: return True return False
python
def columnType( self ): """ Returns the column type for this item based on the current column. :return <orb.ColumnType> """ schema = self.treeWidget().parent().schema() if ( not schema ): return 0 column = schema.column(self.text(0)) if ( column ): return column.columnType() return ColumnType.String
python
def resolve_label(self, label): """ Resolves a label for this module only. If the label refers to another module, an exception is raised. @type label: str @param label: Label to resolve. @rtype: int @return: Memory address pointed to by the label. @raise ValueError: The label is malformed or impossible to resolve. @raise RuntimeError: Cannot resolve the module or function. """ # Split the label into it's components. # Use the fuzzy mode whenever possible. aProcess = self.get_process() if aProcess is not None: (module, procedure, offset) = aProcess.split_label(label) else: (module, procedure, offset) = _ModuleContainer.split_label(label) # If a module name is given that doesn't match ours, # raise an exception. if module and not self.match_name(module): raise RuntimeError("Label does not belong to this module") # Resolve the procedure if given. if procedure: address = self.resolve(procedure) if address is None: # If it's a debug symbol, use the symbol. address = self.resolve_symbol(procedure) # If it's the keyword "start" use the entry point. if address is None and procedure == "start": address = self.get_entry_point() # The procedure was not found. if address is None: if not module: module = self.get_name() msg = "Can't find procedure %s in module %s" raise RuntimeError(msg % (procedure, module)) # If no procedure is given use the base address of the module. else: address = self.get_base() # Add the offset if given and return the resolved address. if offset: address = address + offset return address
java
private boolean matches7000( ApiDifference apiDiff ) { throwIfMissing( false, true, false, false ); return SelectorUtils.matchPath( method, removeVisibilityFromMethodSignature( apiDiff ) ); }
java
public String setStringValue(String value) throws ControlException { String v = null; if(type!=V4L4JConstants.CTRL_TYPE_STRING) throw new UnsupportedMethod("This control is not a string control"); if (value.length() > max) throw new ControlException("The new string value for this control exceeds the maximum length"); if (value.length() < min) throw new ControlException("The new string value for this control is below the minimum length"); state.get(); try { doSetStringValue(v4l4jObject,id, value); v = doGetStringValue( v4l4jObject, id); } finally { state.put(); } return v; }
java
public DTMIterator createDTMIterator(int node) { // DescendantIterator iter = new DescendantIterator(); DTMIterator iter = new org.apache.xpath.axes.OneStepIteratorForward(Axis.SELF); iter.setRoot(node, this); return iter; // return m_dtmManager.createDTMIterator(node); }
python
def smallest_flagged(heap, row): """Search the heap for the smallest element that is still flagged. Parameters ---------- heap: array of shape (3, n_samples, n_neighbors) The heaps to search row: int Which of the heaps to search Returns ------- index: int The index of the smallest flagged element of the ``row``th heap, or -1 if no flagged elements remain in the heap. """ ind = heap[0, row] dist = heap[1, row] flag = heap[2, row] min_dist = np.inf result_index = -1 for i in range(ind.shape[0]): if flag[i] == 1 and dist[i] < min_dist: min_dist = dist[i] result_index = i if result_index >= 0: flag[result_index] = 0.0 return int(ind[result_index]) else: return -1
java
public static void notPositive(Long value, String message) { if(!validation) return; notNull(value); notNull(message); if(value > 0) throw new ParameterException(ErrorCode.POSITIVE, message); }
java
public static void addRowTimes(Matrix matrix, long diag, long fromCol, long row, double factor) { long cols = matrix.getColumnCount(); for (long col = fromCol; col < cols; col++) { matrix.setAsDouble( matrix.getAsDouble(row, col) - factor * matrix.getAsDouble(diag, col), row, col); } }
python
def float(self, var, default=NOTSET): """ :rtype: float """ return self.get_value(var, cast=float, default=default)
java
@Override public Set<String> getResourceNames(String dirName) { Set<String> resourceNames = new TreeSet<>(); List<ResourceBrowser> list = new ArrayList<>(); list.addAll(resourceInfoProviders); for (ResourceBrowser rsBrowser : list) { if (generatorRegistry.isPathGenerated(dirName)) { if (rsBrowser instanceof ResourceGenerator) { ResourceGenerator rsGeneratorBrowser = (ResourceGenerator) rsBrowser; if (rsGeneratorBrowser.getResolver().matchPath(dirName)) { resourceNames.addAll(rsBrowser.getResourceNames(dirName)); break; } } } else { if (!(rsBrowser instanceof ResourceGenerator)) { resourceNames.addAll(rsBrowser.getResourceNames(dirName)); break; } } } return resourceNames; }
java
public GetIdentityPoolRolesResult withRoles(java.util.Map<String, String> roles) { setRoles(roles); return this; }
python
def get_versions() -> FileVersionResult: """ Search specific project files and extract versions to check. :return: A FileVersionResult object for reporting. """ version_counter = Counter() versions_match = False version_str = None versions_discovered = OrderedDict() for version_obj in version_objects: discovered = version_obj.get_version() versions_discovered[version_obj.key_name] = discovered version_counter.update([discovered]) if len(version_counter) == 1: versions_match = True version_str = list(version_counter.keys())[0] return FileVersionResult( uniform=versions_match, version_details=versions_discovered, version_result=version_str, )
java
public void stop() { if (LOG.isDebugEnabled()) { LOG.debug("Stopping client"); } if (!running.compareAndSet(true, false)) { return; } synchronized (connections) { // wake up all connections for (Connection conn : connections.values()) { conn.interrupt(); } // wait until all connections are closed while (!connections.isEmpty()) { try { connections.wait(); } catch (InterruptedException e) { // pass } } } }
java
public static String getRequestMethod(HttpServletRequest req) { String method; if (req.getMethod() != null) method = req.getMethod().toUpperCase(); else method = AuditEvent.TARGET_METHOD_GET; return method; }
java
private static InputStream openSystemFile(String filename) throws FileNotFoundException { try { return new FileInputStream(filename); } catch(FileNotFoundException e) { // try with classloader String resname = File.separatorChar != '/' ? filename.replace(File.separatorChar, '/') : filename; ClassLoader cl = LoggingConfiguration.class.getClassLoader(); InputStream result = cl.getResourceAsStream(resname); if(result != null) { return result; } // Sometimes, URLClassLoader does not work right. Try harder: URL u = cl.getResource(resname); if(u == null) { throw e; } try { URLConnection conn = u.openConnection(); conn.setUseCaches(false); result = conn.getInputStream(); if(result != null) { return result; } } catch(IOException x) { throw e; // Throw original error instead. } throw e; } }
java
public InterceptorConfigurationBuilder before(Class<? extends AsyncInterceptor> before) { attributes.attribute(BEFORE).set(before); return this; }
java
public IndexTerminal forProperty(String property) { IndexExpression ix = (IndexExpression) this.astNode; ix.setPropertyName(property); IndexTerminal ret = new IndexTerminal(ix); return ret; }