language
stringclasses
2 values
func_code_string
stringlengths
63
466k
java
public void doPrevious(final Wave wave) { if (this.eventList != null && this.timeFrame > 0) { hideCurrent(this.eventList.get(this.timeFrame)); } }
java
public void setAuthorizationRules(java.util.Collection<AuthorizationRule> authorizationRules) { if (authorizationRules == null) { this.authorizationRules = null; return; } this.authorizationRules = new com.amazonaws.internal.SdkInternalList<AuthorizationRule>(authorizationRules); }
java
public void marshall(BatchDetachTypedLinkResponse batchDetachTypedLinkResponse, ProtocolMarshaller protocolMarshaller) { if (batchDetachTypedLinkResponse == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
java
@Override public void eUnset(int featureID) { switch (featureID) { case AfplibPackage.CRC_RESOURCE_MANAGEMENT__FMT_QUAL: setFmtQual(FMT_QUAL_EDEFAULT); return; case AfplibPackage.CRC_RESOURCE_MANAGEMENT__RM_VALUE: setRMValue(RM_VALUE_EDEFAULT); return; case AfplibPackage.CRC_RESOURCE_MANAGEMENT__RES_CLASS_FLG: setResClassFlg(RES_CLASS_FLG_EDEFAULT); return; } super.eUnset(featureID); }
java
private Object find( SearchComparator comp, Object searchKey) { SearchNode point = getSearchNode(); Object ret = null; boolean gotit = optimisticFind(comp, searchKey, point); if (gotit == pessimisticNeeded) pessimisticFind(comp, searchKey, point); if (point.wasFound()) ret = point.foundNode().key(point.foundIndex()); return ret; }
python
def get_self_url(request_data): """ Returns the URL of the current host + current view + query. :param request_data: The request as a dict :type: dict :return: The url of current host + current view + query :rtype: string """ self_url_host = OneLogin_Saml2_Utils.get_self_url_host(request_data) request_uri = '' if 'request_uri' in request_data: request_uri = request_data['request_uri'] if not request_uri.startswith('/'): match = re.search('^https?://[^/]*(/.*)', request_uri) if match is not None: request_uri = match.groups()[0] return self_url_host + request_uri
java
@Override public DBCluster modifyDBCluster(ModifyDBClusterRequest request) { request = beforeClientExecution(request); return executeModifyDBCluster(request); }
java
@XmlElementDecl(namespace = "http://www.opengis.net/gml", name = "segments") public JAXBElement<CurveSegmentArrayPropertyType> createSegments(CurveSegmentArrayPropertyType value) { return new JAXBElement<CurveSegmentArrayPropertyType>(_Segments_QNAME, CurveSegmentArrayPropertyType.class, null, value); }
java
@XmlElementDecl(namespace = "http://www.w3.org/ns/prov#", name = "actedOnBehalfOf") public JAXBElement<ActedOnBehalfOf> createActedOnBehalfOf(ActedOnBehalfOf value) { return new JAXBElement<ActedOnBehalfOf>(_ActedOnBehalfOf_QNAME, ActedOnBehalfOf.class, null, value); }
python
def _get_systemd_services(root): ''' Use os.listdir() to get all the unit files ''' ret = set() for path in SYSTEM_CONFIG_PATHS + (LOCAL_CONFIG_PATH,): # Make sure user has access to the path, and if the path is a # link it's likely that another entry in SYSTEM_CONFIG_PATHS # or LOCAL_CONFIG_PATH points to it, so we can ignore it. path = _root(path, root) if os.access(path, os.R_OK) and not os.path.islink(path): for fullname in os.listdir(path): try: unit_name, unit_type = fullname.rsplit('.', 1) except ValueError: continue if unit_type in VALID_UNIT_TYPES: ret.add(unit_name if unit_type == 'service' else fullname) return ret
python
def sample(self, pmf=None, replace=True): """Sample an item from the strata Parameters ---------- pmf : array-like, shape=(n_strata,), optional, default None probability distribution to use when sampling from the strata. If not given, use the stratum weights. replace : bool, optional, default True whether to sample with replacement Returns ------- loc : int location of the randomly selected item in the original input array stratum_idx : int the stratum index that was sampled from """ stratum_idx = self._sample_stratum(pmf, replace=replace) loc = self._sample_in_stratum(stratum_idx, replace=replace) return loc, stratum_idx
java
private void changePage(final int direction) { int currentPage = pages.indexOf(cardManager.getVisible()); currentPage = Math.min(2, Math.max(0, currentPage + direction)); cardManager.makeVisible(pages.get(currentPage)); prevButton.setDisabled(currentPage == 0); nextButton.setDisabled(currentPage == 2); finishButton.setDisabled(currentPage != 2); cancelButton.setUnsavedChanges(currentPage > 0); }
java
public MultiLineString toMultiLineStringFromList( List<List<LatLng>> polylineList) { return toMultiLineStringFromList(polylineList, false, false); }
java
private long finalizeFlv() { long bytesTransferred = 0L; if (!finalized.get()) { log.debug("Finalizing {}", filePath); try { // read file info if it exists File tmpFile = new File(filePath + ".info"); if (tmpFile.exists()) { int[] info = readInfoFile(tmpFile); if (audioCodecId == -1 && info[0] > 0) { audioCodecId = info[0]; } if (videoCodecId == -1 && info[1] > 0) { videoCodecId = info[1]; } if (duration == 0 && info[2] > 0) { duration = info[2]; } if (audioDataSize == 0 && info[3] > 0) { audioDataSize = info[3]; } if (soundRate == 0 && info[4] > 0) { soundRate = info[4]; } if (soundSize == 0 && info[5] > 0) { soundSize = info[5]; } if (!soundType && info[6] > 0) { soundType = true; } if (videoDataSize == 0 && info[7] > 0) { videoDataSize = info[7]; } } else { log.debug("Flv info file not found"); } tmpFile = null; // write the file header writeHeader(); log.debug("Pos post header: {}", fileChannel.position()); // write the metadata with the final duration writeMetadataTag(duration * 0.001d, videoCodecId, audioCodecId); log.debug("Pos post meta: {}", fileChannel.position()); // create a transfer buffer ByteBuffer dst = ByteBuffer.allocate(1024); // when appending, read original stream data first and put it at the front if (append) { Path prevFlv = Paths.get(filePath.replace(".flv", ".old")); if (Files.exists(prevFlv)) { log.debug("Found previous flv: {} offset: {}", prevFlv, appendOffset); SeekableByteChannel prevChannel = Files.newByteChannel(prevFlv, StandardOpenOption.READ); // skip the flv header, prev tag size, and possibly metadata prevChannel.position(appendOffset); int read = -1, wrote; boolean showfirsttag = true; do { read = prevChannel.read(dst); log.trace("Read: {} bytes", read); if (read > 0) { dst.flip(); // inspect the byte to make sure its a valid type if (showfirsttag) { showfirsttag = false; dst.mark(); log.debug("Tag type: {}", (dst.get() & 31)); dst.reset(); } wrote = fileChannel.write(dst); log.trace("Wrote: {} bytes", wrote); bytesTransferred += wrote; } dst.compact(); } while (read > 0); dst.clear(); prevChannel.close(); // remove the previous flv Files.deleteIfExists(prevFlv); log.debug("Previous FLV bytes written: {} final position: {}", (bytesWritten + bytesTransferred), fileChannel.position()); } else { log.warn("Previous flv to be appended was not found: {}", prevFlv); } } // get starting position of the channel where latest stream data was written long pos = dataChannel.position(); log.trace("Data available: {} bytes", pos); // set the data file the beginning dataChannel.position(0L); // transfer / write data file into final flv int read = -1, wrote; do { read = dataChannel.read(dst); log.trace("Read: {} bytes", read); if (read > 0) { dst.flip(); wrote = fileChannel.write(dst); log.trace("Wrote: {} bytes", wrote); bytesTransferred += wrote; } dst.compact(); } while (read > 0); dst.clear(); dataChannel.close(); // get final position long length = fileChannel.position(); // close the file fileChannel.close(); // close and remove the ser file if write was successful if (bytesTransferred > 0) { if (!Files.deleteIfExists(Paths.get(filePath + ".info"))) { log.warn("FLV info file not deleted"); } if (!Files.deleteIfExists(Paths.get(filePath + ".ser"))) { log.warn("FLV serial file not deleted"); } } log.debug("FLV bytes written: {} final position: {}", (bytesWritten + bytesTransferred), length); } catch (Exception e) { log.warn("Finalization of flv file failed; new finalize job will be spawned", e); } finally { finalized.compareAndSet(false, true); // check for post processors that may be available if (FLVWriter.flv != null) { LinkedList<Class<IPostProcessor>> writePostProcessors = ((FLV) FLVWriter.flv).getWritePostProcessors(); if (writePostProcessors != null) { for (Class<IPostProcessor> postProcessor : writePostProcessors) { try { addPostProcessor(postProcessor.newInstance()); } catch (Exception e) { log.warn("Post processor: {} instance creation failed", postProcessor, e); } } } } // run post process if (postProcessors != null) { for (IPostProcessor postProcessor : postProcessors) { log.debug("Execute: {}", postProcessor); try { // set properties that the post processor requires or may require postProcessor.init(filePath); // execute and block executor.submit(postProcessor).get(); } catch (Throwable t) { log.warn("Exception during post process on: {}", filePath, t); } } postProcessors.clear(); } else { log.debug("No post processors configured"); } } } else { log.trace("Finalization already completed"); } return bytesTransferred; }
python
def fastrcnn_outputs(feature, num_classes, class_agnostic_regression=False): """ Args: feature (any shape): num_classes(int): num_category + 1 class_agnostic_regression (bool): if True, regression to N x 1 x 4 Returns: cls_logits: N x num_class classification logits reg_logits: N x num_classx4 or Nx2x4 if class agnostic """ classification = FullyConnected( 'class', feature, num_classes, kernel_initializer=tf.random_normal_initializer(stddev=0.01)) num_classes_for_box = 1 if class_agnostic_regression else num_classes box_regression = FullyConnected( 'box', feature, num_classes_for_box * 4, kernel_initializer=tf.random_normal_initializer(stddev=0.001)) box_regression = tf.reshape(box_regression, (-1, num_classes_for_box, 4), name='output_box') return classification, box_regression
python
def wait(self, dwMilliseconds = None): """ Waits for the next debug event. @see: L{cont}, L{dispatch}, L{loop} @type dwMilliseconds: int @param dwMilliseconds: (Optional) Timeout in milliseconds. Use C{INFINITE} or C{None} for no timeout. @rtype: L{Event} @return: An event that occured in one of the debugees. @raise WindowsError: Raises an exception on error. If no target processes are left to debug, the error code is L{win32.ERROR_INVALID_HANDLE}. """ # Wait for the next debug event. raw = win32.WaitForDebugEvent(dwMilliseconds) event = EventFactory.get(self, raw) # Remember it. self.lastEvent = event # Return it. return event
java
public void setPreparationThreads(int preparationThreads) { this.preparationThreads = preparationThreads; this.threadPool = java.util.concurrent.Executors.newFixedThreadPool(preparationThreads); }
python
def MakeDeployableBinary(self, template_path, output_path): """Repackage the template zip with the installer.""" context = self.context + ["Client Context"] zip_data = io.BytesIO() output_zip = zipfile.ZipFile( zip_data, mode="w", compression=zipfile.ZIP_DEFLATED) z_template = zipfile.ZipFile(open(template_path, "rb")) # Track which files we've copied already. completed_files = [ "grr-client.exe", "GRRservice.exe", "dbg_grr-client.exe", "dbg_GRRservice.exe" ] # Change the name of the main binary to the configured name. client_bin_name = config.CONFIG.Get("Client.binary_name", context=context) console_build = config.CONFIG.Get("ClientBuilder.console", context=context) if console_build: client_filename = "dbg_grr-client.exe" service_filename = "dbg_GRRservice.exe" else: client_filename = "grr-client.exe" service_filename = "GRRservice.exe" bin_name = z_template.getinfo(client_filename) output_zip.writestr(client_bin_name, z_template.read(bin_name)) CopyFileInZip(z_template, "grr-client.exe.manifest", output_zip, "%s.manifest" % client_bin_name) completed_files.append("grr-client.exe.manifest") # Change the name of the service binary to the configured name. service_template = z_template.getinfo(service_filename) service_bin_name = config.CONFIG.Get( "Nanny.service_binary_name", context=context) output_zip.writestr(service_bin_name, z_template.read(service_template)) if config.CONFIG["Client.fleetspeak_enabled"]: self._GenerateFleetspeakServiceConfig(output_zip) if self.signed_template: # If the template libs were already signed we can skip signing CreateNewZipWithSignedLibs( z_template, output_zip, ignore_files=completed_files) else: CreateNewZipWithSignedLibs( z_template, output_zip, ignore_files=completed_files, signer=self.signer) output_zip.close() return self.MakeSelfExtractingZip(zip_data.getvalue(), output_path)
java
private double winkler(double totalScore, String S, String T) { totalScore = totalScore + (getPrefix(S, T) * 0.1 * (1.0 - totalScore)); return totalScore; }
java
private Iterable<String> retainExistingRoles(final Collection<String> roles) { final List<String> existingRoles = getRoles(); existingRoles.retainAll(roles); return existingRoles; }
python
def _generate_transformations(self, structure): """ The central problem with trying to enumerate magnetic orderings is that we have to enumerate orderings that might plausibly be magnetic ground states, while not enumerating orderings that are physically implausible. The problem is that it is not always obvious by e.g. symmetry arguments alone which orderings to prefer. Here, we use a variety of strategies (heuristics) to enumerate plausible orderings, and later discard any duplicates that might be found by multiple strategies. This approach is not ideal, but has been found to be relatively robust over a wide range of magnetic structures. Args: structure: A sanitized input structure (_sanitize_input_structure) Returns: A dict of a transformation class instance (values) and name of enumeration strategy (keys) """ formula = structure.composition.reduced_formula transformations = {} # analyzer is used to obtain information on sanitized input analyzer = CollinearMagneticStructureAnalyzer( structure, default_magmoms=self.default_magmoms, overwrite_magmom_mode="replace_all", ) if not analyzer.is_magnetic: raise ValueError( "Not detected as magnetic, add a new default magmom for the " "element you believe may be magnetic?" ) # now we can begin to generate our magnetic orderings self.logger.info("Generating magnetic orderings for {}".format(formula)) mag_species_spin = analyzer.magnetic_species_and_magmoms types_mag_species = sorted( analyzer.types_of_magnetic_specie, key=lambda sp: analyzer.default_magmoms.get(str(sp), 0), reverse=True, ) num_mag_sites = analyzer.number_of_magnetic_sites num_unique_sites = analyzer.number_of_unique_magnetic_sites() # enumerations become too slow as number of unique sites (and thus # permutations) increase, 8 is a soft limit, this can be increased # but do so with care if num_unique_sites > self.max_unique_sites: raise ValueError("Too many magnetic sites to sensibly perform enumeration.") # maximum cell size to consider: as a rule of thumb, if the primitive cell # contains a large number of magnetic sites, perhaps we only need to enumerate # within one cell, whereas on the other extreme if the primitive cell only # contains a single magnetic site, we have to create larger supercells if "max_cell_size" not in self.transformation_kwargs: # TODO: change to 8 / num_mag_sites ? self.transformation_kwargs["max_cell_size"] = max(1, int(4 / num_mag_sites)) self.logger.info( "Max cell size set to {}".format( self.transformation_kwargs["max_cell_size"] ) ) # when enumerating ferrimagnetic structures, it's useful to detect # symmetrically distinct magnetic sites, since different # local environments can result in different magnetic order # (e.g. inverse spinels) # initially, this was done by co-ordination number, but is # now done by a full symmetry analysis sga = SpacegroupAnalyzer(structure) structure_sym = sga.get_symmetrized_structure() wyckoff = ["n/a"] * len(structure) for indices, symbol in zip( structure_sym.equivalent_indices, structure_sym.wyckoff_symbols ): for index in indices: wyckoff[index] = symbol is_magnetic_sites = [ True if site.specie in types_mag_species else False for site in structure ] # we're not interested in sites that we don't think are magnetic, # set these symbols to None to filter them out later wyckoff = [ symbol if is_magnetic_site else "n/a" for symbol, is_magnetic_site in zip(wyckoff, is_magnetic_sites) ] structure.add_site_property("wyckoff", wyckoff) wyckoff_symbols = set(wyckoff) - {"n/a"} # if user doesn't specifically request ferrimagnetic_Cr2NiO4 orderings, # we apply a heuristic as to whether to attempt them or not if self.automatic: if ( "ferrimagnetic_by_motif" not in self.strategies and len(wyckoff_symbols) > 1 and len(types_mag_species) == 1 ): self.strategies += ("ferrimagnetic_by_motif",) if ( "antiferromagnetic_by_motif" not in self.strategies and len(wyckoff_symbols) > 1 and len(types_mag_species) == 1 ): self.strategies += ("antiferromagnetic_by_motif",) if ( "ferrimagnetic_by_species" not in self.strategies and len(types_mag_species) > 1 ): self.strategies += ("ferrimagnetic_by_species",) # we start with a ferromagnetic ordering if "ferromagnetic" in self.strategies: # TODO: remove 0 spins ! fm_structure = analyzer.get_ferromagnetic_structure() # store magmom as spin property, to be consistent with output from # other transformations fm_structure.add_spin_by_site(fm_structure.site_properties["magmom"]) fm_structure.remove_site_property("magmom") # we now have our first magnetic ordering... self.ordered_structures.append(fm_structure) self.ordered_structure_origins.append("fm") # we store constraint(s) for each strategy first, # and then use each to perform a transformation later all_constraints = {} # ...to which we can add simple AFM cases first... if "antiferromagnetic" in self.strategies: constraint = MagOrderParameterConstraint( 0.5, # TODO: update MagOrderParameterConstraint in pymatgen to take types_mag_species directly species_constraints=list(map(str, types_mag_species)), ) all_constraints["afm"] = [constraint] # allows for non-magnetic sublattices if len(types_mag_species) > 1: for sp in types_mag_species: constraints = [ MagOrderParameterConstraint(0.5, species_constraints=str(sp)) ] all_constraints["afm_by_{}".format(sp)] = constraints # ...and then we also try ferrimagnetic orderings by motif if a # single magnetic species is present... if "ferrimagnetic_by_motif" in self.strategies and len(wyckoff_symbols) > 1: # these orderings are AFM on one local environment, and FM on the rest for symbol in wyckoff_symbols: constraints = [ MagOrderParameterConstraint( 0.5, site_constraint_name="wyckoff", site_constraints=symbol ), MagOrderParameterConstraint( 1.0, site_constraint_name="wyckoff", site_constraints=list(wyckoff_symbols - {symbol}), ), ] all_constraints["ferri_by_motif_{}".format(symbol)] = constraints # and also try ferrimagnetic when there are multiple magnetic species if "ferrimagnetic_by_species" in self.strategies: sp_list = [str(site.specie) for site in structure] num_sp = {sp: sp_list.count(str(sp)) for sp in types_mag_species} total_mag_sites = sum(num_sp.values()) for sp in types_mag_species: # attempt via a global order parameter all_constraints["ferri_by_{}".format(sp)] = num_sp[sp] / total_mag_sites # attempt via afm on sp, fm on remaining species constraints = [ MagOrderParameterConstraint(0.5, species_constraints=str(sp)), MagOrderParameterConstraint( 1.0, species_constraints=list( map(str, set(types_mag_species) - {sp}) ), ), ] all_constraints["ferri_by_{}_afm".format(sp)] = constraints # ...and finally, we can try orderings that are AFM on one local # environment, and non-magnetic on the rest -- this is less common # but unless explicitly attempted, these states are unlikely to be found if "antiferromagnetic_by_motif" in self.strategies: for symbol in wyckoff_symbols: constraints = [ MagOrderParameterConstraint( 0.5, site_constraint_name="wyckoff", site_constraints=symbol ) ] all_constraints["afm_by_motif_{}".format(symbol)] = constraints # and now construct all our transformations for each strategy transformations = {} for name, constraints in all_constraints.items(): trans = MagOrderingTransformation( mag_species_spin, order_parameter=constraints, **self.transformation_kwargs ) transformations[name] = trans return transformations
python
def update_confirmation_comment(self, confirmation_comment_id, confirmation_comment_dict): """ Updates a confirmation comment :param confirmation_comment_id: the confirmation comment id :param confirmation_comment_dict: dict :return: dict """ return self._create_put_request( resource=CONFIRMATION_COMMENTS, billomat_id=confirmation_comment_id, send_data=confirmation_comment_dict )
python
def get_metric_by_week(self, unique_identifier, metric, from_date, limit=10, **kwargs): """ Returns the ``metric`` for ``unique_identifier`` segmented by week starting from``from_date`` :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param from_date: A python date object :param limit: The total number of weeks to retrive starting from ``from_date`` """ conn = kwargs.get("connection", None) closest_monday_from_date = self._get_closest_week(from_date) metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=limit)) date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7)) #generate a list of mondays in between the start date and the end date series = list(itertools.islice(date_generator, limit)) metric_keys = [self._get_weekly_metric_name(metric, monday_date) for monday_date in series] metric_func = lambda conn: [conn.hmget(self._get_weekly_metric_key(unique_identifier, \ metric_key_date), metric_keys) for metric_key_date in metric_key_date_range] if conn is not None: results = metric_func(conn) else: with self._analytics_backend.map() as conn: results = metric_func(conn) series, results = self._parse_and_process_metrics(series, results) return series, results
python
def gfstep(time): """ Return the time step set by the most recent call to :func:`gfsstp`. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gfstep_c.html :param time: Ignored ET value. :type time: float :return: Time step to take. :rtype: float """ time = ctypes.c_double(time) step = ctypes.c_double() libspice.gfstep_c(time, ctypes.byref(step)) return step.value
java
public int getStatus() { Integer intStatus = (Integer)this.getFieldData(m_iStatusField); if (intStatus != null) return intStatus.intValue(); return 0; }
python
def roles(self, value): """ Setter for **self.__roles** attribute. :param value: Attribute value. :type value: dict """ if value is not None: assert type(value) is dict, "'{0}' attribute: '{1}' type is not 'dict'!".format("roles", value) for key in value: assert type(key) is Qt.ItemDataRole, "'{0}' attribute: '{1}' type is not 'Qt.ItemDataRole'!".format( "roles", key) self.__roles = value
java
public void setTypeResolver(RelationshipResolver resolver, Class<?> type) { if (resolver != null) { String typeName = ReflectionUtils.getTypeName(type); if (typeName != null) { typedResolvers.put(type, resolver); } } }
python
def _axis_properties(self, axis, title_size, title_offset, label_angle, label_align, color): """Assign axis properties""" if self.axes: axis = [a for a in self.axes if a.scale == axis][0] self._set_axis_properties(axis) self._set_all_axis_color(axis, color) if title_size: axis.properties.title.font_size = ValueRef(value=title_size) if label_angle: axis.properties.labels.angle = ValueRef(value=label_angle) if label_align: axis.properties.labels.align = ValueRef(value=label_align) if title_offset: axis.properties.title.dy = ValueRef(value=title_offset) else: raise ValueError('This Visualization has no axes!')
java
public static void centerDialog(JDialog dialog) { Dimension screenSize = Toolkit.getDefaultToolkit().getScreenSize(); Dimension frameSize = dialog.getSize(); dialog.setLocation(screenSize.width / 2 - frameSize.width / 2, screenSize.height / 2 - frameSize.height / 2); dialog.setLocationByPlatform(true); }
java
private boolean performFlushIfRequired() throws IOException { if (anyAreSet(state, FLUSHING_BUFFER)) { final ByteBuffer[] bufs = new ByteBuffer[additionalBuffer == null ? 1 : 2]; long totalLength = 0; bufs[0] = currentBuffer.getBuffer(); totalLength += bufs[0].remaining(); if (additionalBuffer != null) { bufs[1] = additionalBuffer; totalLength += bufs[1].remaining(); } if (totalLength > 0) { long total = 0; long res = 0; do { res = next.write(bufs, 0, bufs.length); total += res; if (res == 0) { return false; } } while (total < totalLength); } additionalBuffer = null; currentBuffer.getBuffer().clear(); state = state & ~FLUSHING_BUFFER; } return true; }
java
protected String createCanonicalRequest(SignableRequest<?> request, String contentSha256) { /* This would url-encode the resource path for the first time. */ final String path = SdkHttpUtils.appendUri( request.getEndpoint().getPath(), request.getResourcePath()); final StringBuilder canonicalRequestBuilder = new StringBuilder(request .getHttpMethod().toString()); canonicalRequestBuilder.append(LINE_SEPARATOR) // This would optionally double url-encode the resource path .append(getCanonicalizedResourcePath(path, doubleUrlEncode)) .append(LINE_SEPARATOR) .append(getCanonicalizedQueryString(request)) .append(LINE_SEPARATOR) .append(getCanonicalizedHeaderString(request)) .append(LINE_SEPARATOR) .append(getSignedHeadersString(request)).append(LINE_SEPARATOR) .append(contentSha256); final String canonicalRequest = canonicalRequestBuilder.toString(); if (log.isDebugEnabled()) log.debug("AWS4 Canonical Requests: '\"" + canonicalRequest + "\""); return canonicalRequest; }
python
def _reverse_convert(x, factor1, factor2): """ Converts mixing ratio x in c1 - c2 tie line to that in comp1 - comp2 tie line. Args: x (float): Mixing ratio x in c1 - c2 tie line, a float between 0 and 1. factor1 (float): Compositional ratio between composition c1 and processed composition comp1. E.g., factor for Composition('SiO2') and Composition('O') is 2. factor2 (float): Compositional ratio between composition c2 and processed composition comp2. Returns: Mixing ratio in comp1 - comp2 tie line, a float between 0 and 1. """ return x * factor1 / ((1-x) * factor2 + x * factor1)
python
def delete(self, id): '''Delete a single item with the given ID''' if not self._item_path: raise AttributeError('delete is not available for %s' % self._item_name) target = self._item_path % id self._redmine.delete(target) return None
python
def bingham_mean(dec=None, inc=None, di_block=None): """ Calculates the Bingham mean and associated statistical parameters from either a list of declination values and a separate list of inclination values or from a di_block (a nested list a nested list of [dec,inc,1.0]). Returns a dictionary with the Bingham mean and statistical parameters. Parameters ---------- dec: list of declinations inc: list of inclinations or di_block: a nested list of [dec,inc,1.0] A di_block can be provided instead of dec, inc lists in which case it will be used. Either dec, inc lists or a di_block need to passed to the function. Returns --------- bpars : dictionary containing the Bingham mean and associated statistics. Examples -------- Use lists of declination and inclination to calculate a Bingham mean: >>> ipmag.bingham_mean(dec=[140,127,142,136],inc=[21,23,19,22]) {'Edec': 220.84075754194598, 'Einc': -13.745780972597291, 'Eta': 9.9111522306938742, 'Zdec': 280.38894136954474, 'Zeta': 9.8653370276451113, 'Zinc': 64.23509410796224, 'dec': 136.32637167111312, 'inc': 21.34518678073179, 'n': 4} Use a di_block to calculate a Bingham mean (will give the same output as the example with the lists): >>> ipmag.bingham_mean(di_block=[[140,21],[127,23],[142,19],[136,22]]) """ if di_block is None: di_block = make_di_block(dec, inc) return pmag.dobingham(di_block) else: return pmag.dobingham(di_block)
java
private void send(ByteBuffer data) { try { this.socket.send(new DatagramPacket( data.array(), data.capacity(), this.address)); } catch (IOException ioe) { logger.info("Error sending datagram packet to tracker at {}: {}.", this.address, ioe.getMessage()); } }
python
def process_spider_output(self, response, result, spider): ''' Ensures the meta data from the response is passed through in any Request's generated from the spider ''' self.logger.debug("processing meta passthrough middleware") for x in result: # only operate on requests if isinstance(x, Request): self.logger.debug("found request") # pass along all known meta fields, only if # they were not already set in the spider's new request for key in list(response.meta.keys()): if key not in x.meta: x.meta[key] = response.meta[key] yield x
python
def stop_choose(self): """Marks the item as the one the user is not in.""" if self.choosed: self.choosed = False self.pos = self.pos + Sep(-5, 0)
python
def duplicate_items(*collections): """Search for duplicate items in all collections. Examples -------- >>> duplicate_items([1, 2], [3]) set() >>> duplicate_items({1: 'a', 2: 'a'}) set() >>> duplicate_items(['a', 'b', 'a']) {'a'} >>> duplicate_items([1, 2], {3: 'hi', 4: 'ha'}, (2, 3)) {2, 3} """ duplicates = set() seen = set() for item in flatten(collections): if item in seen: duplicates.add(item) else: seen.add(item) return duplicates
java
private void fillBuffer() throws IOException { if (!endOfInput && (lastCoderResult == null || lastCoderResult.isUnderflow())) { encoderIn.compact(); int position = encoderIn.position(); // We don't use Reader#read(CharBuffer) here because it is more efficient // to write directly to the underlying char array (the default implementation // copies data to a temporary char array). int c = reader.read(encoderIn.array(), position, encoderIn.remaining()); if (c == -1) { endOfInput = true; } else { encoderIn.position(position + c); } encoderIn.flip(); } encoderOut.compact(); lastCoderResult = encoder.encode(encoderIn, encoderOut, endOfInput); encoderOut.flip(); }
python
def latencies(self): """List[Tuple[:class:`int`, :class:`float`]]: A list of latencies between a HEARTBEAT and a HEARTBEAT_ACK in seconds. This returns a list of tuples with elements ``(shard_id, latency)``. """ return [(shard_id, shard.ws.latency) for shard_id, shard in self.shards.items()]
java
@Override public void onNDArray(INDArray arr) { try (AeronNDArrayPublisher publisher = AeronNDArrayPublisher.builder().streamId(streamId).ctx(aeronContext) .channel(masterUrl).build()) { publisher.publish(arr); log.debug("NDArray PublishingListener publishing to channel " + masterUrl + ":" + streamId); } catch (Exception e) { throw new RuntimeException(e); } }
python
def awsRetry(f): """ This decorator retries the wrapped function if aws throws unexpected errors errors. It should wrap any function that makes use of boto """ @wraps(f) def wrapper(*args, **kwargs): for attempt in retry(delays=truncExpBackoff(), timeout=300, predicate=awsRetryPredicate): with attempt: return f(*args, **kwargs) return wrapper
java
public com.cloudant.client.api.model.Response removeAttachment(String id, String rev, String attachmentName) { Response couchDbResponse = db.removeAttachment(id, rev, attachmentName); com.cloudant.client.api.model.Response response = new com.cloudant.client.api.model .Response(couchDbResponse); return response; }
python
def environ(on=os, **kw): """Update one or more environment variables. Preserves the previous environment variable (if available) and can be applied to remote connections that offer an @environ@ attribute using the @on@ argument. """ originals = list() for key in kw: originals.append((key, on.environ.get(key, None))) on.environ[key] = kw[key] yield for key, value in originals: if not value: del on.environ[key] continue on.environ[key] = value
java
public List<Path> getExtraFilesDirectories() { return extraFilesDirectories == null ? null : extraFilesDirectories.stream().map(File::toPath).collect(Collectors.toList()); }
java
@Override public void writeToParcel(Parcel dest, int flags) { dest.writeString(id); dest.writeString(alert); dest.writeString(url); dest.writeString(payload); dest.writeString(mid); dest.writeString(sound); dest.writeString(String.valueOf(bridge)); dest.writeString(priority); dest.writeString(visibility); dest.writeString(redact); dest.writeString(category); dest.writeString(key); dest.writeString(gcmStyle); dest.writeString(iconName); dest.writeInt(notificationId); dest.writeString(lights); dest.writeString(messageType); dest.writeInt(hasTemplate); }
python
def get_schema_from_list(table_name, frum): """ SCAN THE LIST FOR COLUMN TYPES """ columns = UniqueIndex(keys=("name",)) _get_schema_from_list(frum, ".", parent=".", nested_path=ROOT_PATH, columns=columns) return Schema(table_name=table_name, columns=list(columns))
java
public Template parseTeaTemplate(String templateName) throws IOException { if (templateName == null) { return null; } preserveParseTree(templateName); compile(templateName); CompilationUnit unit = getCompilationUnit(templateName, null); if (unit == null) { return null; } return unit.getParseTree(); }
python
def send(self, api, force_send): """ Send this item using api. :param api: D4S2Api sends messages to D4S2 :param force_send: bool should we send even if the item already exists """ item_id = self.get_existing_item_id(api) if not item_id: item_id = self.create_item_returning_id(api) api.send_item(self.destination, item_id, force_send) else: if force_send: api.send_item(self.destination, item_id, force_send) else: item_type = D4S2Api.DEST_TO_NAME.get(self.destination, "Item") msg = "{} already sent. Run with --resend argument to resend." raise D4S2Error(msg.format(item_type), warning=True)
java
public void marshall(ListTagsForStreamRequest listTagsForStreamRequest, ProtocolMarshaller protocolMarshaller) { if (listTagsForStreamRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(listTagsForStreamRequest.getNextToken(), NEXTTOKEN_BINDING); protocolMarshaller.marshall(listTagsForStreamRequest.getStreamARN(), STREAMARN_BINDING); protocolMarshaller.marshall(listTagsForStreamRequest.getStreamName(), STREAMNAME_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
python
def expand_multirow_data(data): """ Converts multirow cells to a list of lists and informs the number of lines of each row. Returns: tuple: new_data, row_heights """ num_cols = len(data[0]) # number of columns # calculates row heights row_heights = [] for mlrow in data: row_height = 0 for j, cell in enumerate(mlrow): row_height = max(row_height, 1 if not isinstance(cell, (list, tuple)) else len(cell)) row_heights.append(row_height) num_lines = sum(row_heights) # line != row (rows are multiline) # rebuilds table data new_data = [[""]*num_cols for i in range(num_lines)] i0 = 0 for row_height, mlrow in zip(row_heights, data): for j, cell in enumerate(mlrow): if not isinstance(cell, (list, tuple)): cell = [cell] for incr, x in enumerate(cell): new_data[i0+incr][j] = x i0 += row_height return new_data, row_heights
python
def forcemigrate(app=None): """Force migrations to apply for a given app.""" if app is None: abort("No app name given.") local("./manage.py migrate {} --fake".format(app)) local("./manage.py migrate {}".format(app))
java
protected static MPResourceArray processMethodBulk(Class clazz, String methodName, String param1, Boolean useCache) throws MPException { HashMap<String, String> mapParams = new HashMap<String, String>(); mapParams.put("param1", param1); return processMethodBulk(clazz, methodName, mapParams, useCache); }
python
def _array_to_table(arr, rownames, colnames, n_digits): """Print an array with row and column names Example: mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat beta[1,1] 0.0 0.0 1.0 -2.0 -0.7 0.0 0.7 2.0 4000 1 beta[1,2] 0.0 0.0 1.0 -2.1 -0.7 0.0 0.7 2.0 4000 1 beta[2,1] 0.0 0.0 1.0 -2.0 -0.7 0.0 0.7 2.0 4000 1 beta[2,2] 0.0 0.0 1.0 -1.9 -0.6 0.0 0.7 2.0 4000 1 lp__ -4.2 0.1 2.1 -9.4 -5.4 -3.8 -2.7 -1.2 317 1 """ assert arr.shape == (len(rownames), len(colnames)) rownames_maxwidth = max(len(n) for n in rownames) max_col_width = 7 min_col_width = 5 widths = [rownames_maxwidth] + [max(max_col_width, max(len(n) + 1, min_col_width)) for n in colnames] header = '{:>{width}}'.format('', width=widths[0]) for name, width in zip(colnames, widths[1:]): header += '{name:>{width}}'.format(name=name, width=width) lines = [header] for rowname, row in zip(rownames, arr): line = '{name:{width}}'.format(name=rowname, width=widths[0]) for j, (num, width) in enumerate(zip(row, widths[1:])): if colnames[j] == 'n_eff': num = int(round(num, 0)) if not np.isnan(num) else num line += '{num:>{width}}'.format(num=_format_number(num, n_digits, max_col_width - 1), width=width) lines.append(line) return '\n'.join(lines)
java
public void write(DataOutput out) throws IOException { out.writeInt(bytesPerCRC); out.writeLong(crcPerBlock); md5.write(out); }
java
public List<CmsJspNavElement> getNavigationForFolder( String folder, Visibility visibility, CmsResourceFilter resourceFilter) { folder = CmsFileUtil.removeTrailingSeparator(folder); List<CmsJspNavElement> result = new ArrayList<CmsJspNavElement>(); List<CmsResource> resources = null; try { resources = m_cms.getResourcesInFolder(folder, resourceFilter); } catch (Exception e) { // should never happen LOG.error(e.getLocalizedMessage(), e); } if (resources == null) { return Collections.<CmsJspNavElement> emptyList(); } boolean includeAll = visibility == Visibility.all; boolean includeHidden = visibility == Visibility.includeHidden; for (CmsResource r : resources) { CmsJspNavElement element = getNavigationForResource(m_cms.getSitePath(r), resourceFilter); if ((element != null) && (includeAll || (element.isInNavigation() && (includeHidden || !element.isHiddenNavigationEntry())))) { element.setNavContext(new NavContext(this, visibility, resourceFilter)); result.add(element); } } Collections.sort(result); return result; }
java
private static short readLEShort(InputStream in) throws IOException { short s = (byte)in.read(); s |= (byte)in.read() << 8; return s; }
java
public static String getTarballResourceNameFromUrl(String url) { String resourceName = url.substring(url.lastIndexOf('/') + 1); return resourceName; }
java
public String removeResponseBaggage(String key) { if (BAGGAGE_ENABLE && key != null) { return responseBaggage.remove(key); } return null; }
python
def fix_groups(groups): """Takes care of strange group numbers.""" _groups = [] for g in groups: try: if not float(g) > 0: _groups.append(1000) else: _groups.append(int(g)) except TypeError as e: logging.info("Error in reading group number (check your db)") logging.debug(g) logging.debug(e) _groups.append(1000) return _groups
java
public void setDBEngineVersions(java.util.Collection<DBEngineVersion> dBEngineVersions) { if (dBEngineVersions == null) { this.dBEngineVersions = null; return; } this.dBEngineVersions = new java.util.ArrayList<DBEngineVersion>(dBEngineVersions); }
java
public void setFormat(int formatElementIndex, Format newFormat) { int formatNumber = 0; for (int partIndex = 0; (partIndex = nextTopLevelArgStart(partIndex)) >= 0;) { if (formatNumber == formatElementIndex) { setCustomArgStartFormat(partIndex, newFormat); return; } ++formatNumber; } throw new ArrayIndexOutOfBoundsException(formatElementIndex); }
java
public final EObject entryRuleNumberLiteral() throws RecognitionException { EObject current = null; EObject iv_ruleNumberLiteral = null; try { // InternalSimpleExpressions.g:469:2: (iv_ruleNumberLiteral= ruleNumberLiteral EOF ) // InternalSimpleExpressions.g:470:2: iv_ruleNumberLiteral= ruleNumberLiteral EOF { newCompositeNode(grammarAccess.getNumberLiteralRule()); pushFollow(FOLLOW_1); iv_ruleNumberLiteral=ruleNumberLiteral(); state._fsp--; current =iv_ruleNumberLiteral; match(input,EOF,FOLLOW_2); } } catch (RecognitionException re) { recover(input,re); appendSkippedTokens(); } finally { } return current; }
java
public NewChunk asciiToUpper(NewChunk nc) { // copy existing data nc = this.extractRows(nc, 0,_len); //update offsets and byte array for(int i= 0; i < nc._sslen; i++) { if (nc._ss[i] > 0x60 && nc._ss[i] < 0x7B) // check for capital letter nc._ss[i] -= 0x20; // upper it } return nc; }
python
def remove_duplicate_edges_undirected(udg): """Removes duplicate edges from an undirected graph.""" # With undirected edges, we need to hash both combinations of the to-from node ids, since a-b and b-a are equivalent # --For aesthetic, we sort the edge ids so that lower edges ids are kept lookup = {} edges = sorted(udg.get_all_edge_ids()) for edge_id in edges: e = udg.get_edge(edge_id) tpl_a = e['vertices'] tpl_b = (tpl_a[1], tpl_a[0]) if tpl_a in lookup or tpl_b in lookup: udg.delete_edge_by_id(edge_id) else: lookup[tpl_a] = edge_id lookup[tpl_b] = edge_id
python
def scheme_specification(cls): """ :meth:`.WSchemeHandler.scheme_specification` method implementation """ return WSchemeSpecification( 'file', WURIComponentVerifier(WURI.Component.path, WURIComponentVerifier.Requirement.optional) )
java
public void validateAttributesForm( AttributeSwapRequest attributeSwapRequest, MessageContext context) { final RequestContext requestContext = RequestContextHolder.getRequestContext(); final ExternalContext externalContext = requestContext.getExternalContext(); final Set<String> swappableAttributes = this.attributeSwapperHelper.getSwappableAttributes(externalContext); final Map<String, Attribute> currentAttributes = attributeSwapRequest.getCurrentAttributes(); this.checkAttributesMap( context, "currentAttributes", swappableAttributes, currentAttributes); final Map<String, Attribute> attributesToCopy = attributeSwapRequest.getAttributesToCopy(); this.checkAttributesMap(context, "attributesToCopy", swappableAttributes, attributesToCopy); }
python
def mld_snooping_ipv6_pim_snooping_pimv6_enable(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") mld_snooping = ET.SubElement(config, "mld-snooping", xmlns="urn:brocade.com:mgmt:brocade-mld-snooping") ipv6 = ET.SubElement(mld_snooping, "ipv6") pim = ET.SubElement(ipv6, "pim") snooping = ET.SubElement(pim, "snooping") pimv6_enable = ET.SubElement(snooping, "pimv6-enable") callback = kwargs.pop('callback', self._callback) return callback(config)
java
private static int getLoggedinState(Context context, String uid) { Cursor cursor = context.getContentResolver().query( LOGGED_IN_URI.buildUpon().appendPath(uid).build(), null, // projection null, // selection clause null, // selection args null); // sort order if (cursor == null) { // DropboxApp not installed return NO_USER; } cursor.moveToFirst(); return cursor.getInt(cursor.getColumnIndex("logged_in")); }
java
public ServiceFuture<Void> beginRefreshMemberSchemaAsync(String resourceGroupName, String serverName, String databaseName, String syncGroupName, String syncMemberName, final ServiceCallback<Void> serviceCallback) { return ServiceFuture.fromResponse(beginRefreshMemberSchemaWithServiceResponseAsync(resourceGroupName, serverName, databaseName, syncGroupName, syncMemberName), serviceCallback); }
python
def replay_scope(self, sess): """Enters a replay scope that unsets it at the end.""" current_replay = self.replay(sess) try: self.set_replay(sess, True) yield finally: self.set_replay(sess, current_replay)
python
def _should_send_property(self, key, value): """Check the property lock (property_lock)""" to_json = self.trait_metadata(key, 'to_json', self._trait_to_json) if key in self._property_lock: # model_state, buffer_paths, buffers split_value = _remove_buffers({ key: to_json(value, self)}) split_lock = _remove_buffers({ key: self._property_lock[key]}) # A roundtrip conversion through json in the comparison takes care of # idiosyncracies of how python data structures map to json, for example # tuples get converted to lists. if (jsonloads(jsondumps(split_value[0])) == split_lock[0] and split_value[1] == split_lock[1] and _buffer_list_equal(split_value[2], split_lock[2])): return False if self._holding_sync: self._states_to_send.add(key) return False else: return True
java
public static void setupClasspath( final Path distributedClassPath, final Path intermediateClassPath, final Job job ) throws IOException { String classpathProperty = System.getProperty("druid.hadoop.internal.classpath"); if (classpathProperty == null) { classpathProperty = System.getProperty("java.class.path"); } String[] jarFiles = classpathProperty.split(File.pathSeparator); final Configuration conf = job.getConfiguration(); final FileSystem fs = distributedClassPath.getFileSystem(conf); if (fs instanceof LocalFileSystem) { return; } for (String jarFilePath : jarFiles) { final File jarFile = new File(jarFilePath); if (jarFile.getName().endsWith(".jar")) { try { RetryUtils.retry( () -> { if (isSnapshot(jarFile)) { addSnapshotJarToClassPath(jarFile, intermediateClassPath, fs, job); } else { addJarToClassPath(jarFile, distributedClassPath, intermediateClassPath, fs, job); } return true; }, shouldRetryPredicate(), NUM_RETRIES ); } catch (Exception e) { throw new RuntimeException(e); } } } }
python
def add_number_mapping(self, abi, number, name): """ Associate a syscall number with the name of a function present in the underlying SimLibrary :param abi: The abi for which this mapping applies :param number: The syscall number :param name: The name of the function """ self.syscall_number_mapping[abi][number] = name self.syscall_name_mapping[abi][name] = number
python
def as_widget(self, widget=None, attrs=None, only_initial=False): """ Renders the field. """ attrs = attrs or {} attrs.update(self.form.get_widget_attrs(self)) if hasattr(self.field, 'widget_css_classes'): css_classes = self.field.widget_css_classes else: css_classes = getattr(self.form, 'widget_css_classes', None) if css_classes: attrs.update({'class': css_classes}) widget_classes = self.form.fields[self.name].widget.attrs.get('class', None) if widget_classes: if attrs.get('class', None): attrs['class'] += ' ' + widget_classes else: attrs.update({'class': widget_classes}) return super(NgBoundField, self).as_widget(widget, attrs, only_initial)
python
def delete_branch(self, project, repository, name, end_point): """ Delete branch from related repo :param self: :param project: :param repository: :param name: :param end_point: :return: """ url = 'rest/branch-utils/1.0/projects/{project}/repos/{repository}/branches'.format(project=project, repository=repository) data = {"name": str(name), "endPoint": str(end_point)} return self.delete(url, data=data)
python
def get_pd_by_name(self, name): """ Get ScaleIO ProtectionDomain object by its name :param name: Name of ProtectionDomain :return: ScaleIO ProtectionDomain object :raise KeyError: No ProtetionDomain with specified name found :rtype: ProtectionDomain object """ for pd in self.conn.protection_domains: if pd.name == name: return pd raise KeyError("Protection Domain NAME " + name + " not found")
python
def connect(self, uri=None, cleansession=None, cafile=None, capath=None, cadata=None, extra_headers={}): """ Connect to a remote broker. At first, a network connection is established with the server using the given protocol (``mqtt``, ``mqtts``, ``ws`` or ``wss``). Once the socket is connected, a `CONNECT <http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc398718028>`_ message is sent with the requested informations. This method is a *coroutine*. :param uri: Broker URI connection, conforming to `MQTT URI scheme <https://github.com/mqtt/mqtt.github.io/wiki/URI-Scheme>`_. Uses ``uri`` config attribute by default. :param cleansession: MQTT CONNECT clean session flag :param cafile: server certificate authority file (optional, used for secured connection) :param capath: server certificate authority path (optional, used for secured connection) :param cadata: server certificate authority data (optional, used for secured connection) :param extra_headers: a dictionary with additional http headers that should be sent on the initial connection (optional, used only with websocket connections) :return: `CONNACK <http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc398718033>`_ return code :raise: :class:`hbmqtt.client.ConnectException` if connection fails """ self.session = self._initsession(uri, cleansession, cafile, capath, cadata) self.extra_headers = extra_headers; self.logger.debug("Connect to: %s" % uri) try: return (yield from self._do_connect()) except BaseException as be: self.logger.warning("Connection failed: %r" % be) auto_reconnect = self.config.get('auto_reconnect', False) if not auto_reconnect: raise else: return (yield from self.reconnect())
python
def update(self, d, ignore_errors=True, block_user_signals=False): """ Supply a dictionary or databox with a header of the same format and see what happens! (Hint: it updates the existing values.) This will store non-existent key-value pairs in the dictionary self._lazy_load. When you add settings in the future, these will be checked for the default values. """ if not type(d) == dict: d = d.headers # Update the lazy load self._lazy_load.update(d) # loop over the dictionary and update for k in list(self._lazy_load.keys()): # Only proceed if the parameter exists if not self._find_parameter(k.split('/'), False, True) == None: # Pop the value so it's not set again in the future v = self._lazy_load.pop(k) self._set_value_safe(k, v, ignore_errors, block_user_signals) return self
python
def remove_pattern(self, pattern): """ Removes given pattern from the Model. :param pattern: Pattern. :type pattern: unicode :return: Method success. :rtype: bool """ for index, node in enumerate(self.root_node.children): if node.name != pattern: continue LOGGER.debug("> Removing '{0}' at '{1}' index.".format(pattern, index)) self.beginRemoveRows(self.get_node_index(self.root_node), index, index) pattern_node = self.root_node.child(index) self.root_node.remove_child(index) self.endRemoveRows() self.pattern_removed.emit(pattern_node) return True
java
public void postKey (long time, Key key, boolean pressed, char typedCh, int modFlags) { kevQueue.add(key == null ? new Keyboard.TypedEvent(modFlags, time, typedCh) : new Keyboard.KeyEvent(modFlags, time, key, pressed)); }
java
public static String captureStackTrace(Throwable e) { StringWriter buffer = new StringWriter(); e.printStackTrace(new PrintWriter(buffer)); return buffer.toString(); }
java
protected LightweightTypeReference getCommonSuperType(List<LightweightTypeReference> types, ITypeReferenceOwner owner) { return services.getTypeConformanceComputer().getCommonSuperType(types, owner); }
java
@Override @SuppressWarnings("unchecked") public <T extends CMAResource> T setEnvironmentId(String environmentId) { return (T) setId(environmentId); }
python
def refreshUi( self ): """ Load the plugin information to the interface. """ dataSet = self.dataSet() if not dataSet: return False # lookup widgets based on the data set information for widget in self.findChildren(QWidget): prop = unwrapVariant(widget.property('dataName')) if prop is None: continue # update the data for the widget prop_name = nativestring(prop) if prop_name in dataSet: value = dataSet.value(prop_name) projexui.setWidgetValue(widget, value) return True
python
def my_info(self, access_token): """doc: http://open.youku.com/docs/doc?id=23 """ url = 'https://openapi.youku.com/v2/users/myinfo.json' data = {'client_id': self.client_id, 'access_token': access_token} r = requests.post(url, data=data) check_error(r) return r.json()
python
def owned_attributes(self): """ Returns a list of attributes owned by this service. """ attributes = self._get_object(self.update_api.attributes.owned) return [ExistOwnedAttributeResponse(attribute) for attribute in attributes]
java
@JsonValue public Object toJsonValue() { if (flags != null) return flags; if (mode != null) return mode; return null; }
java
public PipeBlobBuilder add(String name, Object value){ elements.add(PipeDataElement.newInstance(name, value)); return this; }
python
def set_key(self, key, modifiers: typing.List[Key]=None): """This is called when the user successfully finishes recording a key combination.""" if modifiers is None: modifiers = [] # type: typing.List[Key] if key in self.KEY_MAP: key = self.KEY_MAP[key] self._setKeyLabel(key) self.key = key self.controlButton.setChecked(Key.CONTROL in modifiers) self.altButton.setChecked(Key.ALT in modifiers) self.shiftButton.setChecked(Key.SHIFT in modifiers) self.superButton.setChecked(Key.SUPER in modifiers) self.hyperButton.setChecked(Key.HYPER in modifiers) self.metaButton.setChecked(Key.META in modifiers) self.recording_finished.emit(True)
python
def dict_merge(base, addition, append_lists=False): """Merge one dictionary with another, recursively. Fields present in addition will be added to base if not present or merged if both values are dictionaries or lists (with append_lists=True). If the values are different data types, the value in addition will be discarded. No data from base is deleted or overwritten. This function does not modify either dictionary. Dictionaries inside of other container types (list, etc.) are not merged, as the rules for merging would be ambiguous. If values from base and addition are of differing types, the value in addition is discarded. This utility could be expanded to merge Mapping and Container types in the future, but currently works only with dict and list. Arguments: base (dict): The dictionary being added to. addition (dict): The dictionary with additional data. append_lists (bool): When ``True``, fields present in base and addition that are lists will also be merged. Extra values from addition will be appended to the list in base. Returns: dict: The merged base. """ if not isinstance(base, dict) or not isinstance(addition, dict): raise TypeError("dict_merge only works with dicts.") new_base = deepcopy(base) for key, value in addition.items(): # Simplest case: Key not in base, so add value to base if key not in new_base.keys(): new_base[key] = value # If the value is a dict, and base's value is also a dict, merge # If there is a type disagreement, merging cannot and should not happen if isinstance(value, dict) and isinstance(new_base[key], dict): new_base[key] = dict_merge(new_base[key], value) # If value is a list, lists should be merged, and base is compatible elif append_lists and isinstance(value, list) and isinstance(new_base[key], list): new_list = deepcopy(new_base[key]) [new_list.append(item) for item in value if item not in new_list] new_base[key] = new_list # If none of these trigger, discard value from addition implicitly return new_base
python
def load(path): """Loads a pushdb maintained in a properties file at the given path.""" with open(path, 'r') as props: properties = Properties.load(props) return PushDb(properties)
java
private static int toSeleniumCoordinate(Object openCVCoordinate) { if (openCVCoordinate instanceof Long) { return ((Long) openCVCoordinate).intValue(); } if (openCVCoordinate instanceof Double) { return ((Double) openCVCoordinate).intValue(); } return (int) openCVCoordinate; }
python
def append(self, row=None): """append(row=None) :param row: a list of values to apply to the newly append row or :obj:`None` :type row: [:obj:`object`] or :obj:`None` :returns: :obj:`Gtk.TreeIter` of the appended row :rtype: :obj:`Gtk.TreeIter` If `row` is :obj:`None` the appended row will be empty and to fill in values you need to call :obj:`Gtk.ListStore.set`\\() or :obj:`Gtk.ListStore.set_value`\\(). If `row` isn't :obj:`None` it has to be a list of values which will be used to fill the row . """ if row: return self._do_insert(-1, row) # gtk_list_store_insert() does not know about the "position == -1" # case, so use append() here else: return Gtk.ListStore.append(self)
java
public DateTime toDomain(Date from) { if (from == null) { return null; } return new DateTime(from.getTime()); }
python
def _add_log2_depth(in_file, out_file, data): """Create a CNVkit cnn file with depths http://cnvkit.readthedocs.io/en/stable/fileformats.html?highlight=cnn#target-and-antitarget-bin-level-coverages-cnn """ if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: with utils.open_gzipsafe(in_file) as in_handle: with open(tx_out_file, "w") as out_handle: out_handle.write("chromosome\tstart\tend\tgene\tlog2\tdepth\n") for line in in_handle: parts = line.rstrip().split("\t") if len(parts) > 4: # Handle inputs unannotated with gene names if len(parts) == 5: chrom, start, end, orig_name, depth = parts gene_name = orig_name if (orig_name in ["Antitarget", "Background"]) else "." else: assert len(parts) == 6, parts chrom, start, end, orig_name, depth, gene_name = parts depth = float(depth) log2_depth = math.log(float(depth), 2) if depth else -20.0 out_handle.write("%s\t%s\t%s\t%s\t%.3f\t%.2f\n" % (chrom, start, end, gene_name, log2_depth, depth)) return out_file
python
def application(tokens): """Matches function call (application).""" tokens = iter(tokens) func = next(tokens) paren = next(tokens) if func and func.name == "symbol" and paren.name == "lparen": # We would be able to unambiguously parse function application with # whitespace between the function name and the lparen, but let's not # do that because it's unexpected in most languages. if func.end != paren.start: raise errors.EfilterParseError( start=func.start, end=paren.end, message="No whitespace allowed between function and paren.") return common.TokenMatch(None, func.value, (func, paren))
java
protected void onItemLongClick(View view, int position){ if(itemLongClickListener != null) itemLongClickListener.onItemLongClick(view, getItem(position), position); }
python
def destroy(self): """ Custom destructor that deletes the fragment and removes itself from the adapter it was added to. """ #: Destroy fragment fragment = self.fragment if fragment: #: Stop listening fragment.setFragmentListener(None) #: Cleanup from fragment if self.adapter is not None: self.adapter.removeFragment(self.fragment) del self.fragment super(AndroidFragment, self).destroy()
java
public Geometry createGeometry(Geometry geometry) { if (geometry instanceof Point) { return createPoint(geometry.getCoordinate()); } else if (geometry instanceof LinearRing) { return createLinearRing(geometry.getCoordinates()); } else if (geometry instanceof LineString) { return createLineString(geometry.getCoordinates()); } else if (geometry instanceof Polygon) { Polygon polygon = (Polygon) geometry; LinearRing exteriorRing = createLinearRing(polygon.getExteriorRing().getCoordinates()); LinearRing[] interiorRings = new LinearRing[polygon.getNumInteriorRing()]; for (int n = 0; n < polygon.getNumInteriorRing(); n++) { interiorRings[n] = createLinearRing(polygon.getInteriorRingN(n).getCoordinates()); } return new Polygon(srid, precision, exteriorRing, interiorRings); } else if (geometry instanceof MultiPoint) { Point[] clones = new Point[geometry.getNumGeometries()]; for (int n = 0; n < geometry.getNumGeometries(); n++) { clones[n] = createPoint(geometry.getGeometryN(n).getCoordinate()); } return new MultiPoint(srid, precision, clones); } else if (geometry instanceof MultiLineString) { LineString[] clones = new LineString[geometry.getNumGeometries()]; for (int n = 0; n < geometry.getNumGeometries(); n++) { clones[n] = createLineString(geometry.getGeometryN(n).getCoordinates()); } return new MultiLineString(srid, precision, clones); } else if (geometry instanceof MultiPolygon) { Polygon[] clones = new Polygon[geometry.getNumGeometries()]; for (int n = 0; n < geometry.getNumGeometries(); n++) { clones[n] = (Polygon) createGeometry(geometry.getGeometryN(n)); } return new MultiPolygon(srid, precision, clones); } return null; }